code
stringlengths
1
1.49M
vector
listlengths
0
7.38k
snippet
listlengths
0
7.38k
""" Implementation of JSONEncoder """ import re try: from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii except ImportError: pass ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') ESCAPE_DCT = { '\\': '\\\\', '"': '\\"', '\b': '\\b', '\f': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', } for i in range(0x20): ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) # Assume this produces an infinity on all machines (probably not guaranteed) INFINITY = float('1e66666') FLOAT_REPR = repr def floatstr(o, allow_nan=True): # Check for specials. Note that this type of test is processor- and/or # platform-specific, so do tests which don't depend on the internals. if o != o: text = 'NaN' elif o == INFINITY: text = 'Infinity' elif o == -INFINITY: text = '-Infinity' else: return FLOAT_REPR(o) if not allow_nan: raise ValueError("Out of range float values are not JSON compliant: %r" % (o,)) return text def encode_basestring(s): """ Return a JSON representation of a Python string """ def replace(match): return ESCAPE_DCT[match.group(0)] return '"' + ESCAPE.sub(replace, s) + '"' def py_encode_basestring_ascii(s): if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): s = match.group(0) try: return ESCAPE_DCT[s] except KeyError: n = ord(s) if n < 0x10000: return '\\u%04x' % (n,) else: # surrogate pair n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) return '\\u%04x\\u%04x' % (s1, s2) return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' try: encode_basestring_ascii = c_encode_basestring_ascii except NameError: encode_basestring_ascii = py_encode_basestring_ascii class JSONEncoder(object): """ Extensible JSON <http://json.org> encoder for Python data structures. Supports the following objects and types by default: +-------------------+---------------+ | Python | JSON | +===================+===============+ | dict | object | +-------------------+---------------+ | list, tuple | array | +-------------------+---------------+ | str, unicode | string | +-------------------+---------------+ | int, long, float | number | +-------------------+---------------+ | True | true | +-------------------+---------------+ | False | false | +-------------------+---------------+ | None | null | +-------------------+---------------+ To extend this to recognize other objects, subclass and implement a ``.default()`` method with another method that returns a serializable object for ``o`` if possible, otherwise it should call the superclass implementation (to raise ``TypeError``). """ __all__ = ['__init__', 'default', 'encode', 'iterencode'] item_separator = ', ' key_separator = ': ' def __init__(self, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, sort_keys=False, indent=None, separators=None, encoding='utf-8', default=None): """ Constructor for JSONEncoder, with sensible defaults. If skipkeys is False, then it is a TypeError to attempt encoding of keys that are not str, int, long, float or None. If skipkeys is True, such items are simply skipped. If ensure_ascii is True, the output is guaranteed to be str objects with all incoming unicode characters escaped. If ensure_ascii is false, the output will be unicode object. If check_circular is True, then lists, dicts, and custom encoded objects will be checked for circular references during encoding to prevent an infinite recursion (which would cause an OverflowError). Otherwise, no such check takes place. If allow_nan is True, then NaN, Infinity, and -Infinity will be encoded as such. This behavior is not JSON specification compliant, but is consistent with most JavaScript based encoders and decoders. Otherwise, it will be a ValueError to encode such floats. If sort_keys is True, then the output of dictionaries will be sorted by key; this is useful for regression tests to ensure that JSON serializations can be compared on a day-to-day basis. If indent is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. None is the most compact representation. If specified, separators should be a (item_separator, key_separator) tuple. The default is (', ', ': '). To get the most compact JSON representation you should specify (',', ':') to eliminate whitespace. If specified, default is a function that gets called for objects that can't otherwise be serialized. It should return a JSON encodable version of the object or raise a ``TypeError``. If encoding is not None, then all input strings will be transformed into unicode using that encoding prior to JSON-encoding. The default is UTF-8. """ self.skipkeys = skipkeys self.ensure_ascii = ensure_ascii self.check_circular = check_circular self.allow_nan = allow_nan self.sort_keys = sort_keys self.indent = indent self.current_indent_level = 0 if separators is not None: self.item_separator, self.key_separator = separators if default is not None: self.default = default self.encoding = encoding def _newline_indent(self): return '\n' + (' ' * (self.indent * self.current_indent_level)) def _iterencode_list(self, lst, markers=None): if not lst: yield '[]' return if markers is not None: markerid = id(lst) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = lst yield '[' if self.indent is not None: self.current_indent_level += 1 newline_indent = self._newline_indent() separator = self.item_separator + newline_indent yield newline_indent else: newline_indent = None separator = self.item_separator first = True for value in lst: if first: first = False else: yield separator for chunk in self._iterencode(value, markers): yield chunk if newline_indent is not None: self.current_indent_level -= 1 yield self._newline_indent() yield ']' if markers is not None: del markers[markerid] def _iterencode_dict(self, dct, markers=None): if not dct: yield '{}' return if markers is not None: markerid = id(dct) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = dct yield '{' key_separator = self.key_separator if self.indent is not None: self.current_indent_level += 1 newline_indent = self._newline_indent() item_separator = self.item_separator + newline_indent yield newline_indent else: newline_indent = None item_separator = self.item_separator first = True if self.ensure_ascii: encoder = encode_basestring_ascii else: encoder = encode_basestring allow_nan = self.allow_nan if self.sort_keys: keys = dct.keys() keys.sort() items = [(k, dct[k]) for k in keys] else: items = dct.iteritems() _encoding = self.encoding _do_decode = (_encoding is not None and not (_encoding == 'utf-8')) for key, value in items: if isinstance(key, str): if _do_decode: key = key.decode(_encoding) elif isinstance(key, basestring): pass # JavaScript is weakly typed for these, so it makes sense to # also allow them. Many encoders seem to do something like this. elif isinstance(key, float): key = floatstr(key, allow_nan) elif isinstance(key, (int, long)): key = str(key) elif key is True: key = 'true' elif key is False: key = 'false' elif key is None: key = 'null' elif self.skipkeys: continue else: raise TypeError("key %r is not a string" % (key,)) if first: first = False else: yield item_separator yield encoder(key) yield key_separator for chunk in self._iterencode(value, markers): yield chunk if newline_indent is not None: self.current_indent_level -= 1 yield self._newline_indent() yield '}' if markers is not None: del markers[markerid] def _iterencode(self, o, markers=None): if isinstance(o, basestring): if self.ensure_ascii: encoder = encode_basestring_ascii else: encoder = encode_basestring _encoding = self.encoding if (_encoding is not None and isinstance(o, str) and not (_encoding == 'utf-8')): o = o.decode(_encoding) yield encoder(o) elif o is None: yield 'null' elif o is True: yield 'true' elif o is False: yield 'false' elif isinstance(o, (int, long)): yield str(o) elif isinstance(o, float): yield floatstr(o, self.allow_nan) elif isinstance(o, (list, tuple)): for chunk in self._iterencode_list(o, markers): yield chunk elif isinstance(o, dict): for chunk in self._iterencode_dict(o, markers): yield chunk else: if markers is not None: markerid = id(o) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = o for chunk in self._iterencode_default(o, markers): yield chunk if markers is not None: del markers[markerid] def _iterencode_default(self, o, markers=None): newobj = self.default(o) return self._iterencode(newobj, markers) def default(self, o): """ Implement this method in a subclass such that it returns a serializable object for ``o``, or calls the base implementation (to raise a ``TypeError``). For example, to support arbitrary iterators, you could implement default like this:: def default(self, o): try: iterable = iter(o) except TypeError: pass else: return list(iterable) return JSONEncoder.default(self, o) """ raise TypeError("%r is not JSON serializable" % (o,)) def encode(self, o): """ Return a JSON string representation of a Python data structure. >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) '{"foo": ["bar", "baz"]}' """ # This is for extremely simple cases and benchmarks. if isinstance(o, basestring): if isinstance(o, str): _encoding = self.encoding if (_encoding is not None and not (_encoding == 'utf-8')): o = o.decode(_encoding) if self.ensure_ascii: return encode_basestring_ascii(o) else: return encode_basestring(o) # This doesn't pass the iterator directly to ''.join() because the # exceptions aren't as detailed. The list call should be roughly # equivalent to the PySequence_Fast that ''.join() would do. chunks = list(self.iterencode(o)) return ''.join(chunks) def iterencode(self, o): """ Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk) """ if self.check_circular: markers = {} else: markers = None return self._iterencode(o, markers) __all__ = ['JSONEncoder']
[ [ 8, 0, 0.0052, 0.0078, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0104, 0.0026, 0, 0.66, 0.0667, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 7, 0, 0.0195, 0.0104, 0, 0.66...
[ "\"\"\"\nImplementation of JSONEncoder\n\"\"\"", "import re", "try:\n from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii\nexcept ImportError:\n pass", " from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii", "ESCAPE = re.compile(r'...
""" Implementation of JSONDecoder """ import re import sys from simplejson.scanner import Scanner, pattern try: from simplejson._speedups import scanstring as c_scanstring except ImportError: pass FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL def _floatconstants(): import struct import sys _BYTES = '7FF80000000000007FF0000000000000'.decode('hex') if sys.byteorder != 'big': _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] nan, inf = struct.unpack('dd', _BYTES) return nan, inf, -inf NaN, PosInf, NegInf = _floatconstants() def linecol(doc, pos): lineno = doc.count('\n', 0, pos) + 1 if lineno == 1: colno = pos else: colno = pos - doc.rindex('\n', 0, pos) return lineno, colno def errmsg(msg, doc, pos, end=None): lineno, colno = linecol(doc, pos) if end is None: return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos) endlineno, endcolno = linecol(doc, end) return '%s: line %d column %d - line %d column %d (char %d - %d)' % ( msg, lineno, colno, endlineno, endcolno, pos, end) _CONSTANTS = { '-Infinity': NegInf, 'Infinity': PosInf, 'NaN': NaN, 'true': True, 'false': False, 'null': None, } def JSONConstant(match, context, c=_CONSTANTS): s = match.group(0) fn = getattr(context, 'parse_constant', None) if fn is None: rval = c[s] else: rval = fn(s) return rval, None pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant) def JSONNumber(match, context): match = JSONNumber.regex.match(match.string, *match.span()) integer, frac, exp = match.groups() if frac or exp: fn = getattr(context, 'parse_float', None) or float res = fn(integer + (frac or '') + (exp or '')) else: fn = getattr(context, 'parse_int', None) or int res = fn(integer) return res, None pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber) STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) BACKSLASH = { '"': u'"', '\\': u'\\', '/': u'/', 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t', } DEFAULT_ENCODING = "utf-8" def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match): if encoding is None: encoding = DEFAULT_ENCODING chunks = [] _append = chunks.append begin = end - 1 while 1: chunk = _m(s, end) if chunk is None: raise ValueError( errmsg("Unterminated string starting at", s, begin)) end = chunk.end() content, terminator = chunk.groups() if content: if not isinstance(content, unicode): content = unicode(content, encoding) _append(content) if terminator == '"': break elif terminator != '\\': if strict: raise ValueError(errmsg("Invalid control character %r at", s, end)) else: _append(terminator) continue try: esc = s[end] except IndexError: raise ValueError( errmsg("Unterminated string starting at", s, begin)) if esc != 'u': try: m = _b[esc] except KeyError: raise ValueError( errmsg("Invalid \\escape: %r" % (esc,), s, end)) end += 1 else: esc = s[end + 1:end + 5] next_end = end + 5 msg = "Invalid \\uXXXX escape" try: if len(esc) != 4: raise ValueError uni = int(esc, 16) if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535: msg = "Invalid \\uXXXX\\uXXXX surrogate pair" if not s[end + 5:end + 7] == '\\u': raise ValueError esc2 = s[end + 7:end + 11] if len(esc2) != 4: raise ValueError uni2 = int(esc2, 16) uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00)) next_end += 6 m = unichr(uni) except ValueError: raise ValueError(errmsg(msg, s, end)) end = next_end _append(m) return u''.join(chunks), end # Use speedup try: scanstring = c_scanstring except NameError: scanstring = py_scanstring def JSONString(match, context): encoding = getattr(context, 'encoding', None) strict = getattr(context, 'strict', True) return scanstring(match.string, match.end(), encoding, strict) pattern(r'"')(JSONString) WHITESPACE = re.compile(r'\s*', FLAGS) def JSONObject(match, context, _w=WHITESPACE.match): pairs = {} s = match.string end = _w(s, match.end()).end() nextchar = s[end:end + 1] # Trivial empty object if nextchar == '}': return pairs, end + 1 if nextchar != '"': raise ValueError(errmsg("Expecting property name", s, end)) end += 1 encoding = getattr(context, 'encoding', None) strict = getattr(context, 'strict', True) iterscan = JSONScanner.iterscan while True: key, end = scanstring(s, end, encoding, strict) end = _w(s, end).end() if s[end:end + 1] != ':': raise ValueError(errmsg("Expecting : delimiter", s, end)) end = _w(s, end + 1).end() try: value, end = iterscan(s, idx=end, context=context).next() except StopIteration: raise ValueError(errmsg("Expecting object", s, end)) pairs[key] = value end = _w(s, end).end() nextchar = s[end:end + 1] end += 1 if nextchar == '}': break if nextchar != ',': raise ValueError(errmsg("Expecting , delimiter", s, end - 1)) end = _w(s, end).end() nextchar = s[end:end + 1] end += 1 if nextchar != '"': raise ValueError(errmsg("Expecting property name", s, end - 1)) object_hook = getattr(context, 'object_hook', None) if object_hook is not None: pairs = object_hook(pairs) return pairs, end pattern(r'{')(JSONObject) def JSONArray(match, context, _w=WHITESPACE.match): values = [] s = match.string end = _w(s, match.end()).end() # Look-ahead for trivial empty array nextchar = s[end:end + 1] if nextchar == ']': return values, end + 1 iterscan = JSONScanner.iterscan while True: try: value, end = iterscan(s, idx=end, context=context).next() except StopIteration: raise ValueError(errmsg("Expecting object", s, end)) values.append(value) end = _w(s, end).end() nextchar = s[end:end + 1] end += 1 if nextchar == ']': break if nextchar != ',': raise ValueError(errmsg("Expecting , delimiter", s, end)) end = _w(s, end).end() return values, end pattern(r'\[')(JSONArray) ANYTHING = [ JSONObject, JSONArray, JSONString, JSONConstant, JSONNumber, ] JSONScanner = Scanner(ANYTHING) class JSONDecoder(object): """ Simple JSON <http://json.org> decoder Performs the following translations in decoding by default: +---------------+-------------------+ | JSON | Python | +===============+===================+ | object | dict | +---------------+-------------------+ | array | list | +---------------+-------------------+ | string | unicode | +---------------+-------------------+ | number (int) | int, long | +---------------+-------------------+ | number (real) | float | +---------------+-------------------+ | true | True | +---------------+-------------------+ | false | False | +---------------+-------------------+ | null | None | +---------------+-------------------+ It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as their corresponding ``float`` values, which is outside the JSON spec. """ _scanner = Scanner(ANYTHING) __all__ = ['__init__', 'decode', 'raw_decode'] def __init__(self, encoding=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, strict=True): """ ``encoding`` determines the encoding used to interpret any ``str`` objects decoded by this instance (utf-8 by default). It has no effect when decoding ``unicode`` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as ``unicode``. ``object_hook``, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given ``dict``. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). ``parse_float``, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN, null, true, false. This can be used to raise an exception if invalid JSON numbers are encountered. """ self.encoding = encoding self.object_hook = object_hook self.parse_float = parse_float self.parse_int = parse_int self.parse_constant = parse_constant self.strict = strict def decode(self, s, _w=WHITESPACE.match): """ Return the Python representation of ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) """ obj, end = self.raw_decode(s, idx=_w(s, 0).end()) end = _w(s, end).end() if end != len(s): raise ValueError(errmsg("Extra data", s, end, len(s))) return obj def raw_decode(self, s, **kw): """ Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning with a JSON document) and return a 2-tuple of the Python representation and the index in ``s`` where the document ended. This can be used to decode a JSON document from a string that may have extraneous data at the end. """ kw.setdefault('context', self) try: obj, end = self._scanner.iterscan(s, **kw).next() except StopIteration: raise ValueError("No JSON object could be decoded") return obj, end __all__ = ['JSONDecoder']
[ [ 8, 0, 0.0058, 0.0087, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0117, 0.0029, 0, 0.66, 0.0333, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0146, 0.0029, 0, 0.66...
[ "\"\"\"\nImplementation of JSONDecoder\n\"\"\"", "import re", "import sys", "from simplejson.scanner import Scanner, pattern", "try:\n from simplejson._speedups import scanstring as c_scanstring\nexcept ImportError:\n pass", " from simplejson._speedups import scanstring as c_scanstring", "FLAGS...
r""" A simple, fast, extensible JSON encoder and decoder JSON (JavaScript Object Notation) <http://json.org> is a subset of JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data interchange format. simplejson exposes an API familiar to uses of the standard library marshal and pickle modules. Encoding basic Python object hierarchies:: >>> import simplejson >>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) '["foo", {"bar": ["baz", null, 1.0, 2]}]' >>> print simplejson.dumps("\"foo\bar") "\"foo\bar" >>> print simplejson.dumps(u'\u1234') "\u1234" >>> print simplejson.dumps('\\') "\\" >>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) {"a": 0, "b": 0, "c": 0} >>> from StringIO import StringIO >>> io = StringIO() >>> simplejson.dump(['streaming API'], io) >>> io.getvalue() '["streaming API"]' Compact encoding:: >>> import simplejson >>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) '[1,2,3,{"4":5,"6":7}]' Pretty printing:: >>> import simplejson >>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4) { "4": 5, "6": 7 } Decoding JSON:: >>> import simplejson >>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] >>> simplejson.loads('"\\"foo\\bar"') u'"foo\x08ar' >>> from StringIO import StringIO >>> io = StringIO('["streaming API"]') >>> simplejson.load(io) [u'streaming API'] Specializing JSON object decoding:: >>> import simplejson >>> def as_complex(dct): ... if '__complex__' in dct: ... return complex(dct['real'], dct['imag']) ... return dct ... >>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}', ... object_hook=as_complex) (1+2j) >>> import decimal >>> simplejson.loads('1.1', parse_float=decimal.Decimal) Decimal("1.1") Extending JSONEncoder:: >>> import simplejson >>> class ComplexEncoder(simplejson.JSONEncoder): ... def default(self, obj): ... if isinstance(obj, complex): ... return [obj.real, obj.imag] ... return simplejson.JSONEncoder.default(self, obj) ... >>> dumps(2 + 1j, cls=ComplexEncoder) '[2.0, 1.0]' >>> ComplexEncoder().encode(2 + 1j) '[2.0, 1.0]' >>> list(ComplexEncoder().iterencode(2 + 1j)) ['[', '2.0', ', ', '1.0', ']'] Using simplejson from the shell to validate and pretty-print:: $ echo '{"json":"obj"}' | python -msimplejson.tool { "json": "obj" } $ echo '{ 1.2:3.4}' | python -msimplejson.tool Expecting property name: line 1 column 2 (char 2) Note that the JSON produced by this module's default settings is a subset of YAML, so it may be used as a serializer for that as well. """ __version__ = '1.9.2' __all__ = [ 'dump', 'dumps', 'load', 'loads', 'JSONDecoder', 'JSONEncoder', ] if __name__ == '__main__': import warnings warnings.warn('python -msimplejson is deprecated, use python -msiplejson.tool', DeprecationWarning) from simplejson.decoder import JSONDecoder from simplejson.encoder import JSONEncoder else: from decoder import JSONDecoder from encoder import JSONEncoder _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, encoding='utf-8', default=None, ) def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """ Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (skipkeys is False and ensure_ascii is True and check_circular is True and allow_nan is True and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): iterable = _default_encoder.iterencode(obj) else: if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk) def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """ Serialize ``obj`` to a JSON formatted ``str``. If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the return value will be a ``unicode`` instance subject to normal Python ``str`` to ``unicode`` coercion rules instead of being escaped to an ASCII ``str``. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (skipkeys is False and ensure_ascii is True and check_circular is True and allow_nan is True and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): return _default_encoder.encode(obj) if cls is None: cls = JSONEncoder return cls( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).encode(obj) _default_decoder = JSONDecoder(encoding=None, object_hook=None) def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, **kw): """ Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a JSON document) to a Python object. If the contents of ``fp`` is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed, and should be wrapped with ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode`` object and passed to ``loads()`` ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ return loads(fp.read(), encoding=encoding, cls=cls, object_hook=object_hook, parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant, **kw) def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, **kw): """ Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) to a Python object. If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed and should be decoded to ``unicode`` first. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). ``parse_float``, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN, null, true, false. This can be used to raise an exception if invalid JSON numbers are encountered. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and not kw): return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: kw['object_hook'] = object_hook if parse_float is not None: kw['parse_float'] = parse_float if parse_int is not None: kw['parse_int'] = parse_int if parse_constant is not None: kw['parse_constant'] = parse_constant return cls(encoding=encoding, **kw).decode(s) # # Compatibility cruft from other libraries # def decode(s): """ demjson, python-cjson API compatibility hook. Use loads(s) instead. """ import warnings warnings.warn("simplejson.loads(s) should be used instead of decode(s)", DeprecationWarning) return loads(s) def encode(obj): """ demjson, python-cjson compatibility hook. Use dumps(s) instead. """ import warnings warnings.warn("simplejson.dumps(s) should be used instead of encode(s)", DeprecationWarning) return dumps(obj) def read(s): """ jsonlib, JsonUtils, python-json, json-py API compatibility hook. Use loads(s) instead. """ import warnings warnings.warn("simplejson.loads(s) should be used instead of read(s)", DeprecationWarning) return loads(s) def write(obj): """ jsonlib, JsonUtils, python-json, json-py API compatibility hook. Use dumps(s) instead. """ import warnings warnings.warn("simplejson.dumps(s) should be used instead of write(s)", DeprecationWarning) return dumps(obj) if __name__ == '__main__': import simplejson.tool simplejson.tool.main()
[ [ 8, 0, 0.1356, 0.2686, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.2713, 0.0027, 0, 0.66, 0.0714, 162, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.2779, 0.0106, 0, 0.66...
[ "r\"\"\"\nA simple, fast, extensible JSON encoder and decoder\n\nJSON (JavaScript Object Notation) <http://json.org> is a subset of\nJavaScript syntax (ECMA-262 3rd edition) used as a lightweight data\ninterchange format.\n\nsimplejson exposes an API familiar to uses of the standard library", "__version__ = '1.9....
""" Iterator based sre token scanner """ import re from re import VERBOSE, MULTILINE, DOTALL import sre_parse import sre_compile import sre_constants from sre_constants import BRANCH, SUBPATTERN __all__ = ['Scanner', 'pattern'] FLAGS = (VERBOSE | MULTILINE | DOTALL) class Scanner(object): def __init__(self, lexicon, flags=FLAGS): self.actions = [None] # Combine phrases into a compound pattern s = sre_parse.Pattern() s.flags = flags p = [] for idx, token in enumerate(lexicon): phrase = token.pattern try: subpattern = sre_parse.SubPattern(s, [(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))]) except sre_constants.error: raise p.append(subpattern) self.actions.append(token) s.groups = len(p) + 1 # NOTE(guido): Added to make SRE validation work p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) self.scanner = sre_compile.compile(p) def iterscan(self, string, idx=0, context=None): """ Yield match, end_idx for each match """ match = self.scanner.scanner(string, idx).match actions = self.actions lastend = idx end = len(string) while True: m = match() if m is None: break matchbegin, matchend = m.span() if lastend == matchend: break action = actions[m.lastindex] if action is not None: rval, next_pos = action(m, context) if next_pos is not None and next_pos != matchend: # "fast forward" the scanner matchend = next_pos match = self.scanner.scanner(string, matchend).match yield rval, matchend lastend = matchend def pattern(pattern, flags=FLAGS): def decorator(fn): fn.pattern = pattern fn.regex = re.compile(pattern, flags) return fn return decorator
[ [ 8, 0, 0.0299, 0.0448, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0597, 0.0149, 0, 0.66, 0.1, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0746, 0.0149, 0, 0.66, ...
[ "\"\"\"\nIterator based sre token scanner\n\"\"\"", "import re", "from re import VERBOSE, MULTILINE, DOTALL", "import sre_parse", "import sre_compile", "import sre_constants", "from sre_constants import BRANCH, SUBPATTERN", "__all__ = ['Scanner', 'pattern']", "FLAGS = (VERBOSE | MULTILINE | DOTALL)"...
# Much the below modified from the gae-json-rest project import plistlib import logging import re import touchengineutil def id_of(entity): """ Make a {'id': <string-of-digits>} dict for an entity. Args: entity: an entity Returns: a jobj corresponding to the entity """ return dict(id=touchengineutil.id_of(entity)) def send_plist(response_obj, pdata): """ Send data in Plist form to an HTTP-response object. Args: response_obj: an HTTP response object jdata: a dict or list in correct 'plistable' form Side effects: sends the JSON form of jdata on response.out """ response_obj.content_type = 'application/xml' #logging.info("send_plist pdata = %s" %(pdata,)) response_obj.out.write(plistlib.writePlistToString(pdata)) def entity_to_dict(entity): """ Make a plistable dict (a dictObj) given an entity. Args: entity: an entity Returns: the JSONable-form dict (dictObj) for the entity """ model = type(entity) dictObj = id_of(entity) dictObj["key"] = str(entity.key()) props = touchengineutil.allProperties(model) for property_name, property_value in props: value_in_entity = getattr(entity, property_name, None) if value_in_entity is not None: to_string = getattr(model, property_name + '_to_string') #logging.info("type(value_in_entity) = %s" %(type(value_in_entity),)) #logging.info("value_in_entity = %s" %(value_in_entity,)) dictObj[property_name] = to_string(value_in_entity) return dictObj
[ [ 1, 0, 0.0755, 0.0189, 0, 0.66, 0, 592, 0, 1, 0, 0, 592, 0, 0 ], [ 1, 0, 0.0943, 0.0189, 0, 0.66, 0.1667, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.1132, 0.0189, 0, ...
[ "import plistlib", "import logging", "import re", "import touchengineutil", "def id_of(entity):\n \"\"\" Make a {'id': <string-of-digits>} dict for an entity.\n\n Args:\n entity: an entity\n Returns:\n a jobj corresponding to the entity\n \"\"\"", " \"\"\" Make a {'id': <string-of-digits>} dict...
#!/usr/bin/env python import logging import time import re import wsgiref.handlers from google.appengine.ext import webapp from google.appengine.ext.db import Key import cookutil import touchengineutil import plistutil # RE to match: optional /, classname, optional /, ID of 0+ numeric digits CLASSNAME_ID_RE = re.compile(r'^/?(\w+)/?(\d*)$') # TODO: queries, methods, schemas, and MUCH better error-handling!-) def path_to_classname_and_id(path): """ Get a (classname, id) pair from a path. Args: path: a path string to anaylyze Returns: a 2-item tuple: (None, '') if the path does not match CLASSNAME_ID_RE (classname, idstring) if the path does match [idstring may be '', or else a string of digits] """ mo = CLASSNAME_ID_RE.match(path) if mo: return mo.groups() else: return (None, '') class PlistHandler(webapp.RequestHandler, cookutil.CookieMixin): stripFromURL = '' def _serve(self, data): counter = self.get_cookie('counter') if counter: self.set_cookie('counter', str(int(counter) + 1)) else: self.set_cookie('counter', '0') return plistutil.send_plist(self.response, data) def _get_model_and_entity(self, need_model, need_id): """ Analyze self.request.path to get model and entity. Args: need_model: bool: if True, fail if classname is missing need_id: bool: if True, fail if ID is missing Returns 3-item tuple: failed: bool: True iff has failed model: class object or None entity: instance of model or None """ path = self.request.path.lstrip(self.stripFromURL) logging.info(u'path = %s (stripFromURL = %s)' %(path, self.stripFromURL)) classname, strid = path_to_classname_and_id(path) self._classname = classname if not classname: if need_model: self.response.set_status(400, 'Cannot do it without a model.') logging.info(u'_get_model_and_entity 400, Cannot do it without a model.') return need_model, None, None model = touchengineutil.modelClassFromName(classname) if model is None: self.response.set_status(400, 'Model %r not found' % classname) logging.info(u'_get_model_and_entity 400, Model %r not found' % classname) return True, None, None if not strid: if need_id: self.response.set_status(400, 'Cannot do it without an ID.') logging.info(u'_get_model_and_entity 400, Cannot do it without an ID.') return need_id, model, None try: numid = int(strid) except TypeError: self.response.set_status(400, 'ID %r is not numeric.' % strid) logging.info(u'_get_model_and_entity 400, ID %r is not numeric.' % strid) return True, model, None else: entity = model.get_by_id(numid) if entity is None: self.response.set_status(404, "Entity %s not found" % self.request.path) logging.info(u'_get_model_and_entity 400, Entity %s not found' % self.request.path) return True, model, None logging.info(u'_get_model_and_entity model:%s entity:%s', model, entity) return False, model, entity def _get(self,short=True, limit=None, afterKey=None, orderBy=None): """ Get Plist data for model names, entity IDs of a model, or an entity. Depending on the request path, serve as JSON to the response object: - for a path of /classname/id, a plist for that entity - for a path of /classname, a list of id-only strings for that model or a list of all entities (if short is False) - for a path of /, a list of all model class names, which allows the API to be introspected this needs some sanitization to keep from throwing exceptions all over the place when the user sends a string that makes us puke. """ coon = str(1 + int(self.get_cookie('coon', '0'))) self.set_cookie('count', coon) self.set_cookie('ts', str(int(time.time()))) failed, model, entity = self._get_model_and_entity(False, False) dictObj = {} if failed: #todo: put the errors above in here as well dictObj = {"error":"See response code"} elif model is None: #return all class names dictObj = {"allModelClassNames":touchengineutil.allModelClassNames()} #TODO: yeah, all these nested "if" statements, ugly. elif entity is None: classSetname = model.__name__ + "_set" if not limit: if not afterKey: if not orderBy: models = model.all() else: models = model.gql("order by %s" %(orderBy,)) #todo: sanitize me? check Model.properties() for orderBy at least else: if not orderBy: models = model.gql("Where __key__ > :1", Key(encoded=afterKey)) #todo: handle key doesn't exist else: models = model.gql("Where __key__ > :1 order by %s" %(orderBy,), Key(encoded=afterKey)) else: #limit if not afterKey: if not orderBy: models = model.all().fetch(int(limit)) else: models = model.gql("order by %s" %(orderBy, )).fetch(int(limit)) else: if not orderBy: models = model.gql("Where __key__ > :1", Key(encoded=afterKey)).fetch(int(limit)) else: models = model.gql("Where __key__ > :1 order by %s" %(orderBy,), Key(encoded=afterKey)).fetch(int(limit)) if short: dictObj = {classSetname:[touchengineutil.classAndIdFromModelInstance(eachModelInstance) for eachModelInstance in models]} else: dictObj = {classSetname:[plistutil.entity_to_dict(eachModelInstance) for eachModelInstance in models]} else: #return the dictionary representation of the entity in question dictObj = plistutil.entity_to_dict(entity) return self._serve(dictObj) def get(self): limit = self.request.get("limit") afterKey = self.request.get("afterKey") short = self.request.get("short") orderBy = self.request.get("orderBy") #logging.info("orderBy = %s" %(orderBy,)) if short: short=True #anything in short makes it true else: short=False return self._get(short=short, limit=limit, afterKey=afterKey, orderBy=orderBy)
[ [ 1, 0, 0.0186, 0.0062, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0248, 0.0062, 0, 0.66, 0.0909, 654, 0, 1, 0, 0, 654, 0, 0 ], [ 1, 0, 0.0311, 0.0062, 0, ...
[ "import logging", "import time", "import re", "import wsgiref.handlers", "from google.appengine.ext import webapp", "from google.appengine.ext.db import Key", "import cookutil", "import touchengineutil", "import plistutil", "CLASSNAME_ID_RE = re.compile(r'^/?(\\w+)/?(\\d*)$')", "def path_to_clas...
""" Utilities for JSON REST CRUD support for GAE db models. Terminology: a subclass of db.Model is known as "a Model"; an instance of such a subclass is known as "an entity". Data is said to be in JSONed or JSONable form if it contains only dicts, lists and scalars (strings, numbers) in a form that is correctly serializable into a JSON-format string. In particular, a "jobj" is a JSONed dict with a key 'id' mapping the string format of the numeric value of an entity; each other key must be the name of a property of that entity's Model, and the corresponding value must be a string that can be deserialized into a value of that property's type. """ import re import touchengineutil from django.utils import simplejson def id_of(entity): """ Make a {'id': <string-of-digits>} dict for an entity. Args: entity: an entity Returns: a jobj corresponding to the entity """ return dict(id=touchengineutil.id_of(entity)) # RE to match: optional /, classname, optional /, ID of 0+ numeric digits CLASSNAME_ID_RE = re.compile(r'^/?(\w+)/?(\d*)$') def path_to_classname_and_id(path): """ Get a (classname, id) pair from a path. Args: path: a path string to anaylyze Returns: a 2-item tuple: (None, '') if the path does not match CLASSNAME_ID_RE (classname, idstring) if the path does match [idstring may be '', or else a string of digits] """ mo = CLASSNAME_ID_RE.match(path) if mo: return mo.groups() else: return (None, '') def send_json(response_obj, jdata): """ Send data in JSON form to an HTTP-response object. Args: response_obj: an HTTP response object jdata: a dict or list in correct 'JSONable' form Side effects: sends the JSON form of jdata on response.out """ response_obj.content_type = 'application/json' simplejson.dump(jdata, response_obj.out) def receive_json(request_obj): """ Receive data in JSON form from an HTTP-request object. Args: request_obj: an HTTP request object (with body in JSONed form) Returns: the JSONable-form result of loading the request's body """ return simplejson.loads(request_obj.body) def make_jobj(entity): """ Make a JSONable dict (a jobj) given an entity. Args: entity: an entity Returns: the JSONable-form dict (jobj) for the entity """ model = type(entity) jobj = id_of(entity) props = touchengineutil.allProperties(model) for property_name, property_value in props: value_in_entity = getattr(entity, property_name, None) if value_in_entity is not None: to_string = getattr(model, property_name + '_to_string') jobj[property_name] = to_string(value_in_entity) return jobj def parse_jobj(model, jobj): """ Make dict suitable for instantiating model, given a jobj. Args: model: a Model jobj: a jobj Returns: a dict d such that calling model(**d) properly makes an entity """ result = dict() for property_name, property_value in jobj.iteritems(): # ensure we have an ASCII string, not a Unicode one property_name = str(property_name) from_string = getattr(model, property_name + '_from_string') property_value = from_string(property_value) if property_value is not None: result[property_name] = property_value return result def make_entity(model, jobj): """ Makes an entity whose type is model with the state given by jobj. Args: model: a Model jobj: a jobj Side effects: creates and puts an entity of type model, w/state per jobj Returns: a jobj representing the newly created entity """ entity_dict = parse_jobj(model, jobj) entity = model(**entity_dict) entity.put() jobj = make_jobj(entity) jobj.update(id_of(entity)) return jobj def update_entity(entity, jobj): """ Updates an entity's state as per properties given in jobj. Args: entity: an entity jobj: a jobj Side effects: updates the entity with properties as given by jobj Returns: a jobj representing the whole new state of the entity """ new_entity_data = parse_jobj(type(entity), jobj) for property_name, property_value in new_entity_data.iteritems(): setattr(entity, property_name, property_value) entity.put() return make_jobj(entity)
[ [ 8, 0, 0.051, 0.0952, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.102, 0.0068, 0, 0.66, 0.0833, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.1156, 0.0068, 0, 0.66, ...
[ "\"\"\" Utilities for JSON REST CRUD support for GAE db models.\n\nTerminology: a subclass of db.Model is known as \"a Model\"; an instance of\nsuch a subclass is known as \"an entity\".\n\nData is said to be in JSONed or JSONable form if it contains only dicts, lists\nand scalars (strings, numbers) in a form that ...
"""A toy-level example of a data model in Google Appengine DB terms. """ import logging from touchengine import touchengineutil from Doctor import Doctor from Pager import Pager touchengineutil.decorateModuleNamed(__name__) logging.info('touchengine Models in %r decorated', __name__)
[ [ 8, 0, 0.15, 0.2, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.3, 0.1, 0, 0.66, 0.1667, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.4, 0.1, 0, 0.66, 0.3333, ...
[ "\"\"\"A toy-level example of a data model in Google Appengine DB terms.\n\"\"\"", "import logging", "from touchengine import touchengineutil", "from Doctor import Doctor", "from Pager import Pager", "touchengineutil.decorateModuleNamed(__name__)", "logging.info('touchengine Models in %r decorated', __n...
"""A toy-level example of a RESTful app running on Google Appengine. """ import logging import time import wsgiref.handlers from google.appengine.ext import webapp import models from touchengine import cookutil from touchengine import touchengineutil from touchengine import plistutil from touchengine.plistHandler import PlistHandler # TODO: queries, methods, schemas, and MUCH better error-handling!-) def main(): logging.info('main.py main()') application = webapp.WSGIApplication([('/.*', PlistHandler)], debug=True) wsgiref.handlers.CGIHandler().run(application) if __name__ == '__main__': main()
[ [ 8, 0, 0.0682, 0.0909, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1364, 0.0455, 0, 0.66, 0.0909, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.1818, 0.0455, 0, 0.66...
[ "\"\"\"A toy-level example of a RESTful app running on Google Appengine.\n\"\"\"", "import logging", "import time", "import wsgiref.handlers", "from google.appengine.ext import webapp", "import models", "from touchengine import cookutil", "from touchengine import touchengineutil", "from touchengine ...
import cgi import doctest import logging import os import re logger = logging.getLogger() logger.setLevel(getattr(logging, os.environ.get('LOGLEVEL', 'WARNING'))) class UrlParser(object): """ Parse a URL path and perform appropriate an callback on regex-matching. Instantiate h with a prefix (to be matched, but ignored if it matches), followed by as many (regex, callback) pairs as needed. Then, call h.process(path): if the path matches the prefix, then each regex is tried *IN ORDER* on the rest of the path, and, upon the first match if any, the corresponding callback gets called (and its results returned). If the prefix does not match, or none of the regexes does, then method call h.process(path) returns None. The callback is passed *NAMED* arguments (only!) corresponding to the positional groups matched in the prefix, augmented or overridden by those matched in the specific regex that matched after that. So for example: >>> def show(**k): print sorted(k.items()) >>> h = UrlParser(r'/(?P<foo>\w+)/', ... (r'(?P<bar>\d+)', show), ... (r'(?P<foo>[^/]*)', show), ... ) >>> h.process('/zipzop/23/whatever') [('bar', '23'), ('foo', 'zipzop')] >>> h.process('/zipzop/whoo/whatever') [('foo', 'whoo')] You can also change the prefix by passing a prefix to .process(...) [the new prefix-to-ignore is then remembered in lieu of the previous one]. >>> h.prefix.pattern '/(?P<foo>\\\\w+)/' >>> h.process('/zipzop/whoo/whatever', prefix='/') [('foo', 'zipzop')] >>> h.process('/zipzop/whoo/whatever') [('foo', 'zipzop')] >>> h.prefix.pattern '/' The h.prefix attribute is exposed, and it's a RE object. """ def __init__(self, prefix, *args): """ Takes a prefix to be ignored and 0+ (regex, callback) pair args. Args: prefix: a string regex pattern args: 0+ pairs (regex_pattern, callback) [each a string + a callable] """ self.prefix = re.compile(prefix or '') logging.debug('prefix: %r', prefix) self.callbacks = [] for pattern, callback in args: logging.debug('%r -> %r', pattern, callback) self.callbacks.append((re.compile(pattern), callback)) def process(self, path, prefix=None): """ Match the path to one of the regexs and call the appropriate callback. Args: path: a string URL (complete path) to parse prefix: if not None, a RE pattern string to change self.prefix from now on Returns: the result of the appropriate callback, or None if no match """ if prefix is not None and prefix != self.prefix.pattern: self.prefix = re.compile(prefix) prefix_mo = self.prefix.match(path) if prefix_mo is None: logging.debug('No prefix match for %r (%r)', path, self.prefix) return None pathrest = path[prefix_mo.end():] logging.debug('Matching %r...', pathrest) for regex, callback in self.callbacks: mo = regex.match(pathrest) if mo: logging.debug('Matched %r, calling %r', regex, callback) named_args = prefix_mo.groupdict() named_args.update(mo.groupdict()) return callback(**named_args) logging.debug('No match for %r', pathrest) return None class RestUrlParser(UrlParser): """ Specifically dispatches on the REs associated with REST-shaped URLs. Note that h.process only takes an URL *path*, NOT the rest of the URL (no protocol, no host, no query). >>> h = RestUrlParser('') >>> h.process('/$foobar') ('special', '$foobar') >>> h.process('/foobar') ('model', 'foobar') >>> h.process('/$foobar/zak/') ('special_method', '$foobar', 'zak') >>> h.process('/foobar/zak/') ('model_method', 'foobar', 'zak') >>> h.process('/foobar/23/') ('model_strid', 'foobar', '23') >>> h.process('/foobar/23/blop') ('model_strid_method', 'foobar', '23', 'blop') >>> h.process('') >>> h.process('////////') >>> """ @staticmethod def _doprefix(prefix): if prefix is None: return None prefix = prefix.strip('/') if prefix: return '/%s/' % prefix else: return '/' def process(self, path, prefix=None): return UrlParser.process(self, path, self._doprefix(prefix)) def __init__(self, prefix=None, **overrides): """ Set the prefix-to-ignore, optionally override methods. Args: prefix: a string regex pattern (or None, default) overrides: 0+ named arguments; values are callables to override the methods RestUrlParser provides (which just return tuples of strings), and each such callable must be signature-compatible with the corresponding named method. The methods & signaturs are: do_special(special) do_model(model) do_special_method(special, method) do_model_method(model, method) do_model_strid(model, strid) do_model_strid_method(model, strid, method) The *names* (not necessarily the *order*) of the arguments matter. The values of all arguments are strings (the substrings of the incoming path that match the respective items of the REST URL): strid is always 1+ digits; special is '$' + a valid identifier; model and method are identifiers. """ # let each method be overridden (in the instance) by caller at ctor-time self.__dict__.update(overrides) # prefix must always absorb leading and trailing / prefix = self._doprefix(prefix) # build URL regexes with corresponding names urls = [] def addurl(name, regex): urls.append((regex, getattr(self, 'do_'+name))) sr_method = r'/(?P<method>\w+)' sr_strid = r'/(?P<strid>\d+)' # special_method must be before special (ie. special_method > special) re_special = r'(?P<special>\$\w+)/?' re_special_method = re_special + sr_method addurl('special_method', re_special_method) addurl('special', re_special) # model_strid_method > model_strid > model_method > model re_model = r'(?P<model>\w+)/?' re_model_method = re_model + sr_method re_model_strid = re_model + sr_strid re_model_strid_method = re_model_strid + sr_method addurl('model_strid_method', re_model_strid_method) addurl('model_strid', re_model_strid) addurl('model_method', re_model_method) addurl('model', re_model) UrlParser.__init__(self, prefix, *urls) def do_special(self, special): return 'special', special def do_model(self, model): return 'model', model def do_special_method(self, special, method): return 'special_method', special, method def do_model_method(self, model, method): return 'model_method', model, method def do_model_strid(self, model, strid): return 'model_strid', model, strid def do_model_strid_method(self, model, strid, method): return 'model_strid_method', model, strid, method def _test(): import doctest numfailures, numtests = doctest.testmod() if numfailures == 0: print '%d tests passed successfully' % numtests # if there are any failures, doctest does its own reporting!-) if __name__ == "__main__": _test()
[ [ 1, 0, 0.005, 0.005, 0, 0.66, 0, 934, 0, 1, 0, 0, 934, 0, 0 ], [ 1, 0, 0.0099, 0.005, 0, 0.66, 0.1, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0149, 0.005, 0, 0.66, ...
[ "import cgi", "import doctest", "import logging", "import os", "import re", "logger = logging.getLogger()", "logger.setLevel(getattr(logging, os.environ.get('LOGLEVEL', 'WARNING')))", "class UrlParser(object):\n \"\"\" Parse a URL path and perform appropriate an callback on regex-matching.\n\n I...
#!/usr/bin/env python #Python sonnet maker import wsgiref.handlers from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app #external imports import sonnet import plistlib class MainHandler(webapp.RequestHandler): """Returns sonnets dictionary as a converted plist""" def get(self): plist = plistlib.writePlistToString(sonnet.verses) self.response.out.write(plist) class FrontPage(webapp.RequestHandler): """Displays front page""" def get(self): self.response.out.write(""" <html> <title>iSonnet Application</title> <body> <p>This is a simple web service.</p> <p> A plist is served out here: <a href="http://isonnet.appspot.com/plists/sonnets">isonnet</a> </p> <p> The Touch Engine Open Source Project is here: <a href="http://code.google.com/p/touchengine/">touchengine</a> </p> </body> </html> """) def main(): application = webapp.WSGIApplication([('/plists/sonnets', MainHandler), ('/', FrontPage), ], debug=True) run_wsgi_app(application) if __name__ == '__main__': main()
[ [ 1, 0, 0.087, 0.0217, 0, 0.66, 0, 709, 0, 1, 0, 0, 709, 0, 0 ], [ 1, 0, 0.1087, 0.0217, 0, 0.66, 0.125, 167, 0, 1, 0, 0, 167, 0, 0 ], [ 1, 0, 0.1304, 0.0217, 0, 0....
[ "import wsgiref.handlers", "from google.appengine.ext import webapp", "from google.appengine.ext.webapp.util import run_wsgi_app", "import sonnet", "import plistlib", "class MainHandler(webapp.RequestHandler):\n \"\"\"Returns sonnets dictionary as a converted plist\"\"\"\n def get(self):\n pl...
verses={"verses":[["I","""FROM fairest creatures we desire increase, That thereby beauty's rose might never die, But as the riper should by time decease, His tender heir might bear his memory: But thou, contracted to thine own bright eyes, Feed'st thy light'st flame with self-substantial fuel, Making a famine where abundance lies, Thyself thy foe, to thy sweet self too cruel. Thou that art now the world's fresh ornament And only herald to the gaudy spring, Within thine own bud buriest thy content And, tender churl, makest waste in niggarding. Pity the world, or else this glutton be, To eat the world's due, by the grave and thee."""], ["II","""When forty winters shall beseige thy brow, And dig deep trenches in thy beauty's field, Thy youth's proud livery, so gazed on now, Will be a tatter'd weed, of small worth held: Then being ask'd where all thy beauty lies, Where all the treasure of thy lusty days, To say, within thine own deep-sunken eyes, Were an all-eating shame and thriftless praise. How much more praise deserved thy beauty's use, If thou couldst answer 'This fair child of mine Shall sum my count and make my old excuse,' Proving his beauty by succession thine! This were to be new made when thou art old, And see thy blood warm when thou feel'st it cold."""], ["III","""Look in thy glass, and tell the face thou viewest Now is the time that face should form another; Whose fresh repair if now thou not renewest, Thou dost beguile the world, unbless some mother. For where is she so fair whose unear'd womb Disdains the tillage of thy husbandry? Or who is he so fond will be the tomb Of his self-love, to stop posterity? Thou art thy mother's glass, and she in thee Calls back the lovely April of her prime: So thou through windows of thine age shall see Despite of wrinkles this thy golden time. But if thou live, remember'd not to be, Die single, and thine image dies with thee."""], ["IV","""Unthrifty loveliness, why dost thou spend Upon thyself thy beauty's legacy? Nature's bequest gives nothing but doth lend, And being frank she lends to those are free. Then, beauteous niggard, why dost thou abuse The bounteous largess given thee to give? Profitless usurer, why dost thou use So great a sum of sums, yet canst not live? For having traffic with thyself alone, Thou of thyself thy sweet self dost deceive. Then how, when nature calls thee to be gone, What acceptable audit canst thou leave? Thy unused beauty must be tomb'd with thee, Which, used, lives th' executor to be."""], ["V","""Those hours, that with gentle work did frame The lovely gaze where every eye doth dwell, Will play the tyrants to the very same And that unfair which fairly doth excel: For never-resting time leads summer on To hideous winter and confounds him there; Sap cheque'd with frost and lusty leaves quite gone, Beauty o'ersnow'd and bareness every where: Then, were not summer's distillation left, A liquid prisoner pent in walls of glass, Beauty's effect with beauty were bereft, Nor it nor no remembrance what it was: But flowers distill'd though they with winter meet, Leese but their show; their substance still lives sweet."""], ["VI","""Then let not winter's ragged hand deface In thee thy summer, ere thou be distill'd: Make sweet some vial; treasure thou some place With beauty's treasure, ere it be self-kill'd. That use is not forbidden usury, Which happies those that pay the willing loan; That's for thyself to breed another thee, Or ten times happier, be it ten for one; Ten times thyself were happier than thou art, If ten of thine ten times refigured thee: Then what could death do, if thou shouldst depart, Leaving thee living in posterity? Be not self-will'd, for thou art much too fair To be death's conquest and make worms thine heir."""], ["VII","""Lo! in the orient when the gracious light Lifts up his burning head, each under eye Doth homage to his new-appearing sight, Serving with looks his sacred majesty; And having climb'd the steep-up heavenly hill, Resembling strong youth in his middle age, yet mortal looks adore his beauty still, Attending on his golden pilgrimage; But when from highmost pitch, with weary car, Like feeble age, he reeleth from the day, The eyes, 'fore duteous, now converted are From his low tract and look another way: So thou, thyself out-going in thy noon, Unlook'd on diest, unless thou get a son."""], ["VIII","""Music to hear, why hear'st thou music sadly? Sweets with sweets war not, joy delights in joy. Why lovest thou that which thou receivest not gladly, Or else receivest with pleasure thine annoy? If the true concord of well-tuned sounds, By unions married, do offend thine ear, They do but sweetly chide thee, who confounds In singleness the parts that thou shouldst bear. Mark how one string, sweet husband to another, Strikes each in each by mutual ordering, Resembling sire and child and happy mother Who all in one, one pleasing note do sing: Whose speechless song, being many, seeming one, Sings this to thee: 'thou single wilt prove none.'"""], ["IX","""Is it for fear to wet a widow's eye That thou consumest thyself in single life? Ah! if thou issueless shalt hap to die. The world will wail thee, like a makeless wife; The world will be thy widow and still weep That thou no form of thee hast left behind, When every private widow well may keep By children's eyes her husband's shape in mind. Look, what an unthrift in the world doth spend Shifts but his place, for still the world enjoys it; But beauty's waste hath in the world an end, And kept unused, the user so destroys it. No love toward others in that bosom sits That on himself such murderous shame commits."""], ["X","""For shame! deny that thou bear'st love to any, Who for thyself art so unprovident. Grant, if thou wilt, thou art beloved of many, But that thou none lovest is most evident; For thou art so possess'd with murderous hate That 'gainst thyself thou stick'st not to conspire. Seeking that beauteous roof to ruinate Which to repair should be thy chief desire. O, change thy thought, that I may change my mind! Shall hate be fairer lodged than gentle love? Be, as thy presence is, gracious and kind, Or to thyself at least kind-hearted prove: Make thee another self, for love of me, That beauty still may live in thine or thee."""], ["XI","""As fast as thou shalt wane, so fast thou growest In one of thine, from that which thou departest; And that fresh blood which youngly thou bestowest Thou mayst call thine when thou from youth convertest. Herein lives wisdom, beauty and increase: Without this, folly, age and cold decay: If all were minded so, the times should cease And threescore year would make the world away. Let those whom Nature hath not made for store, Harsh featureless and rude, barrenly perish: Look, whom she best endow'd she gave the more; Which bounteous gift thou shouldst in bounty cherish: She carved thee for her seal, and meant thereby Thou shouldst print more, not let that copy die."""], ["XII","""When I do count the clock that tells the time, And see the brave day sunk in hideous night; When I behold the violet past prime, And sable curls all silver'd o'er with white; When lofty trees I see barren of leaves Which erst from heat did canopy the herd, And summer's green all girded up in sheaves Borne on the bier with white and bristly beard, Then of thy beauty do I question make, That thou among the wastes of time must go, Since sweets and beauties do themselves forsake And die as fast as they see others grow; And nothing 'gainst Time's scythe can make defence Save breed, to brave him when he takes thee hence."""], ["XIII","""O, that you were yourself! but, love, you are No longer yours than you yourself here live: Against this coming end you should prepare, And your sweet semblance to some other give. So should that beauty which you hold in lease Find no determination: then you were Yourself again after yourself's decease, When your sweet issue your sweet form should bear. Who lets so fair a house fall to decay, Which husbandry in honour might uphold Against the stormy gusts of winter's day And barren rage of death's eternal cold? O, none but unthrifts! Dear my love, you know You had a father: let your son say so."""], ["XIV","""Not from the stars do I my judgment pluck; And yet methinks I have astronomy, But not to tell of good or evil luck, Of plagues, of dearths, or seasons' quality; Nor can I fortune to brief minutes tell, Pointing to each his thunder, rain and wind, Or say with princes if it shall go well, By oft predict that I in heaven find: But from thine eyes my knowledge I derive, And, constant stars, in them I read such art As truth and beauty shall together thrive, If from thyself to store thou wouldst convert; Or else of thee this I prognosticate: Thy end is truth's and beauty's doom and date."""], ["XV","""When I consider every thing that grows Holds in perfection but a little moment, That this huge stage presenteth nought but shows Whereon the stars in secret influence comment; When I perceive that men as plants increase, Cheered and cheque'd even by the self-same sky, Vaunt in their youthful sap, at height decrease, And wear their brave state out of memory; Then the conceit of this inconstant stay Sets you most rich in youth before my sight, Where wasteful Time debateth with Decay, To change your day of youth to sullied night; And all in war with Time for love of you, As he takes from you, I engraft you new."""], ["XVI","""But wherefore do not you a mightier way Make war upon this bloody tyrant, Time? And fortify yourself in your decay With means more blessed than my barren rhyme? Now stand you on the top of happy hours, And many maiden gardens yet unset With virtuous wish would bear your living flowers, Much liker than your painted counterfeit: So should the lines of life that life repair, Which this, Time's pencil, or my pupil pen, Neither in inward worth nor outward fair, Can make you live yourself in eyes of men. To give away yourself keeps yourself still, And you must live, drawn by your own sweet skill."""], ["XVII","""Who will believe my verse in time to come, If it were fill'd with your most high deserts? Though yet, heaven knows, it is but as a tomb Which hides your life and shows not half your parts. If I could write the beauty of your eyes And in fresh numbers number all your graces, The age to come would say 'This poet lies: Such heavenly touches ne'er touch'd earthly faces.' So should my papers yellow'd with their age Be scorn'd like old men of less truth than tongue, And your true rights be term'd a poet's rage And stretched metre of an antique song: But were some child of yours alive that time, You should live twice; in it and in my rhyme."""], ["XVIII","""Shall I compare thee to a summer's day? Thou art more lovely and more temperate: Rough winds do shake the darling buds of May, And summer's lease hath all too short a date: Sometime too hot the eye of heaven shines, And often is his gold complexion dimm'd; And every fair from fair sometime declines, By chance or nature's changing course untrimm'd; But thy eternal summer shall not fade Nor lose possession of that fair thou owest; Nor shall Death brag thou wander'st in his shade, When in eternal lines to time thou growest: So long as men can breathe or eyes can see, So long lives this and this gives life to thee."""], ["XIX","""Devouring Time, blunt thou the lion's paws, And make the earth devour her own sweet brood; Pluck the keen teeth from the fierce tiger's jaws, And burn the long-lived phoenix in her blood; Make glad and sorry seasons as thou fleets, And do whate'er thou wilt, swift-footed Time, To the wide world and all her fading sweets; But I forbid thee one most heinous crime: O, carve not with thy hours my love's fair brow, Nor draw no lines there with thine antique pen; Him in thy course untainted do allow For beauty's pattern to succeeding men. Yet, do thy worst, old Time: despite thy wrong, My love shall in my verse ever live young."""], ["XX","""A woman's face with Nature's own hand painted Hast thou, the master-mistress of my passion; A woman's gentle heart, but not acquainted With shifting change, as is false women's fashion; An eye more bright than theirs, less false in rolling, Gilding the object whereupon it gazeth; A man in hue, all 'hues' in his controlling, Much steals men's eyes and women's souls amazeth. And for a woman wert thou first created; Till Nature, as she wrought thee, fell a-doting, And by addition me of thee defeated, By adding one thing to my purpose nothing. But since she prick'd thee out for women's pleasure, Mine be thy love and thy love's use their treasure."""], ["XXI","""So is it not with me as with that Muse Stirr'd by a painted beauty to his verse, Who heaven itself for ornament doth use And every fair with his fair doth rehearse Making a couplement of proud compare, With sun and moon, with earth and sea's rich gems, With April's first-born flowers, and all things rare That heaven's air in this huge rondure hems. O' let me, true in love, but truly write, And then believe me, my love is as fair As any mother's child, though not so bright As those gold candles fix'd in heaven's air: Let them say more than like of hearsay well; I will not praise that purpose not to sell."""], ["XXII","""My glass shall not persuade me I am old, So long as youth and thou are of one date; But when in thee time's furrows I behold, Then look I death my days should expiate. For all that beauty that doth cover thee Is but the seemly raiment of my heart, Which in thy breast doth live, as thine in me: How can I then be elder than thou art? O, therefore, love, be of thyself so wary As I, not for myself, but for thee will; Bearing thy heart, which I will keep so chary As tender nurse her babe from faring ill. Presume not on thy heart when mine is slain; Thou gavest me thine, not to give back again."""], ["XXIII","""As an unperfect actor on the stage Who with his fear is put besides his part, Or some fierce thing replete with too much rage, Whose strength's abundance weakens his own heart. So I, for fear of trust, forget to say The perfect ceremony of love's rite, And in mine own love's strength seem to decay, O'ercharged with burden of mine own love's might. O, let my books be then the eloquence And dumb presagers of my speaking breast, Who plead for love and look for recompense More than that tongue that more hath more express'd. O, learn to read what silent love hath writ: To hear with eyes belongs to love's fine wit."""], ["XXIV.","""Mine eye hath play'd the painter and hath stell'd Thy beauty's form in table of my heart; My body is the frame wherein 'tis held, And perspective it is the painter's art. For through the painter must you see his skill, To find where your true image pictured lies; Which in my bosom's shop is hanging still, That hath his windows glazed with thine eyes. Now see what good turns eyes for eyes have done: Mine eyes have drawn thy shape, and thine for me Are windows to my breast, where-through the sun Delights to peep, to gaze therein on thee; Yet eyes this cunning want to grace their art; They draw but what they see, know not the heart."""], ["XXV","""Let those who are in favour with their stars Of public honour and proud titles boast, Whilst I, whom fortune of such triumph bars, Unlook'd for joy in that I honour most. Great princes' favourites their fair leaves spread But as the marigold at the sun's eye, And in themselves their pride lies buried, For at a frown they in their glory die. The painful warrior famoused for fight, After a thousand victories once foil'd, Is from the book of honour razed quite, And all the rest forgot for which he toil'd: Then happy I, that love and am beloved Where I may not remove nor be removed."""], ["XXVI","""Lord of my love, to whom in vassalage Thy merit hath my duty strongly knit, To thee I send this written embassage, To witness duty, not to show my wit: Duty so great, which wit so poor as mine May make seem bare, in wanting words to show it, But that I hope some good conceit of thine In thy soul's thought, all naked, will bestow it; Till whatsoever star that guides my moving Points on me graciously with fair aspect And puts apparel on my tatter'd loving, To show me worthy of thy sweet respect: Then may I dare to boast how I do love thee; Till then not show my head where thou mayst prove me."""], ["XXVII","""Weary with toil, I haste me to my bed, The dear repose for limbs with travel tired; But then begins a journey in my head, To work my mind, when body's work's expired: For then my thoughts, from far where I abide, Intend a zealous pilgrimage to thee, And keep my drooping eyelids open wide, Looking on darkness which the blind do see Save that my soul's imaginary sight Presents thy shadow to my sightless view, Which, like a jewel hung in ghastly night, Makes black night beauteous and her old face new. Lo! thus, by day my limbs, by night my mind, For thee and for myself no quiet find."""], ["XXVIII","""How can I then return in happy plight, That am debarr'd the benefit of rest? When day's oppression is not eased by night, But day by night, and night by day, oppress'd? And each, though enemies to either's reign, Do in consent shake hands to torture me; The one by toil, the other to complain How far I toil, still farther off from thee. I tell the day, to please them thou art bright And dost him grace when clouds do blot the heaven: So flatter I the swart-complexion'd night, When sparkling stars twire not thou gild'st the even. But day doth daily draw my sorrows longer And night doth nightly make grief's strength seem stronger."""], ["XXIX","""When, in disgrace with fortune and men's eyes, I all alone beweep my outcast state And trouble deaf heaven with my bootless cries And look upon myself and curse my fate, Wishing me like to one more rich in hope, Featured like him, like him with friends possess'd, Desiring this man's art and that man's scope, With what I most enjoy contented least; Yet in these thoughts myself almost despising, Haply I think on thee, and then my state, Like to the lark at break of day arising From sullen earth, sings hymns at heaven's gate; For thy sweet love remember'd such wealth brings That then I scorn to change my state with kings."""], ["XXX","""When to the sessions of sweet silent thought I summon up remembrance of things past, I sigh the lack of many a thing I sought, And with old woes new wail my dear time's waste: Then can I drown an eye, unused to flow, For precious friends hid in death's dateless night, And weep afresh love's long since cancell'd woe, And moan the expense of many a vanish'd sight: Then can I grieve at grievances foregone, And heavily from woe to woe tell o'er The sad account of fore-bemoaned moan, Which I new pay as if not paid before. But if the while I think on thee, dear friend, All losses are restored and sorrows end."""], ["XXXI","""Thy bosom is endeared with all hearts, Which I by lacking have supposed dead, And there reigns love and all love's loving parts, And all those friends which I thought buried. How many a holy and obsequious tear Hath dear religious love stol'n from mine eye As interest of the dead, which now appear But things removed that hidden in thee lie! Thou art the grave where buried love doth live, Hung with the trophies of my lovers gone, Who all their parts of me to thee did give; That due of many now is thine alone: Their images I loved I view in thee, And thou, all they, hast all the all of me."""], ["XXXII","""If thou survive my well-contented day, When that churl Death my bones with dust shall cover, And shalt by fortune once more re-survey These poor rude lines of thy deceased lover, Compare them with the bettering of the time, And though they be outstripp'd by every pen, Reserve them for my love, not for their rhyme, Exceeded by the height of happier men. O, then vouchsafe me but this loving thought: 'Had my friend's Muse grown with this growing age, A dearer birth than this his love had brought, To march in ranks of better equipage: But since he died and poets better prove, Theirs for their style I'll read, his for his love.'"""], ["XXXIII","""Full many a glorious morning have I seen Flatter the mountain-tops with sovereign eye, Kissing with golden face the meadows green, Gilding pale streams with heavenly alchemy; Anon permit the basest clouds to ride With ugly rack on his celestial face, And from the forlorn world his visage hide, Stealing unseen to west with this disgrace: Even so my sun one early morn did shine With all triumphant splendor on my brow; But out, alack! he was but one hour mine; The region cloud hath mask'd him from me now. Yet him for this my love no whit disdaineth; Suns of the world may stain when heaven's sun staineth."""], ["XXXIV","""Why didst thou promise such a beauteous day, And make me travel forth without my cloak, To let base clouds o'ertake me in my way, Hiding thy bravery in their rotten smoke? 'Tis not enough that through the cloud thou break, To dry the rain on my storm-beaten face, For no man well of such a salve can speak That heals the wound and cures not the disgrace: Nor can thy shame give physic to my grief; Though thou repent, yet I have still the loss: The offender's sorrow lends but weak relief To him that bears the strong offence's cross. Ah! but those tears are pearl which thy love sheds, And they are rich and ransom all ill deeds."""], ["XXXV","""No more be grieved at that which thou hast done: Roses have thorns, and silver fountains mud; Clouds and eclipses stain both moon and sun, And loathsome canker lives in sweetest bud. All men make faults, and even I in this, Authorizing thy trespass with compare, Myself corrupting, salving thy amiss, Excusing thy sins more than thy sins are; For to thy sensual fault I bring in sense-- Thy adverse party is thy advocate-- And 'gainst myself a lawful plea commence: Such civil war is in my love and hate That I an accessary needs must be To that sweet thief which sourly robs from me."""], ["XXXVI","""Let me confess that we two must be twain, Although our undivided loves are one: So shall those blots that do with me remain Without thy help by me be borne alone. In our two loves there is but one respect, Though in our lives a separable spite, Which though it alter not love's sole effect, Yet doth it steal sweet hours from love's delight. I may not evermore acknowledge thee, Lest my bewailed guilt should do thee shame, Nor thou with public kindness honour me, Unless thou take that honour from thy name: But do not so; I love thee in such sort As, thou being mine, mine is thy good report."""], ["XXXVII","""As a decrepit father takes delight To see his active child do deeds of youth, So I, made lame by fortune's dearest spite, Take all my comfort of thy worth and truth. For whether beauty, birth, or wealth, or wit, Or any of these all, or all, or more, Entitled in thy parts do crowned sit, I make my love engrafted to this store: So then I am not lame, poor, nor despised, Whilst that this shadow doth such substance give That I in thy abundance am sufficed And by a part of all thy glory live. Look, what is best, that best I wish in thee: This wish I have; then ten times happy me!"""], ["XXXVIII","""How can my Muse want subject to invent, While thou dost breathe, that pour'st into my verse Thine own sweet argument, too excellent For every vulgar paper to rehearse? O, give thyself the thanks, if aught in me Worthy perusal stand against thy sight; For who's so dumb that cannot write to thee, When thou thyself dost give invention light? Be thou the tenth Muse, ten times more in worth Than those old nine which rhymers invocate; And he that calls on thee, let him bring forth Eternal numbers to outlive long date. If my slight Muse do please these curious days, The pain be mine, but thine shall be the praise."""], ["XXXIX","""O, how thy worth with manners may I sing, When thou art all the better part of me? What can mine own praise to mine own self bring? And what is 't but mine own when I praise thee? Even for this let us divided live, And our dear love lose name of single one, That by this separation I may give That due to thee which thou deservest alone. O absence, what a torment wouldst thou prove, Were it not thy sour leisure gave sweet leave To entertain the time with thoughts of love, Which time and thoughts so sweetly doth deceive, And that thou teachest how to make one twain, By praising him here who doth hence remain!"""], ["XL","""Take all my loves, my love, yea, take them all; What hast thou then more than thou hadst before? No love, my love, that thou mayst true love call; All mine was thine before thou hadst this more. Then if for my love thou my love receivest, I cannot blame thee for my love thou usest; But yet be blamed, if thou thyself deceivest By wilful taste of what thyself refusest. I do forgive thy robbery, gentle thief, Although thou steal thee all my poverty; And yet, love knows, it is a greater grief To bear love's wrong than hate's known injury. Lascivious grace, in whom all ill well shows, Kill me with spites; yet we must not be foes."""], ["XLI","""Those petty wrongs that liberty commits, When I am sometime absent from thy heart, Thy beauty and thy years full well befits, For still temptation follows where thou art. Gentle thou art and therefore to be won, Beauteous thou art, therefore to be assailed; And when a woman woos, what woman's son Will sourly leave her till she have prevailed? Ay me! but yet thou mightest my seat forbear, And chide try beauty and thy straying youth, Who lead thee in their riot even there Where thou art forced to break a twofold truth, Hers by thy beauty tempting her to thee, Thine, by thy beauty being false to me."""], ["XLII","""That thou hast her, it is not all my grief, And yet it may be said I loved her dearly; That she hath thee, is of my wailing chief, A loss in love that touches me more nearly. Loving offenders, thus I will excuse ye: Thou dost love her, because thou knowst I love her; And for my sake even so doth she abuse me, Suffering my friend for my sake to approve her. If I lose thee, my loss is my love's gain, And losing her, my friend hath found that loss; Both find each other, and I lose both twain, And both for my sake lay on me this cross: But here's the joy; my friend and I are one; Sweet flattery! then she loves but me alone."""], ["XLIII","""When most I wink, then do mine eyes best see, For all the day they view things unrespected; But when I sleep, in dreams they look on thee, And darkly bright are bright in dark directed. Then thou, whose shadow shadows doth make bright, How would thy shadow's form form happy show To the clear day with thy much clearer light, When to unseeing eyes thy shade shines so! How would, I say, mine eyes be blessed made By looking on thee in the living day, When in dead night thy fair imperfect shade Through heavy sleep on sightless eyes doth stay! All days are nights to see till I see thee, And nights bright days when dreams do show thee me."""], ["XLIV","""If the dull substance of my flesh were thought, Injurious distance should not stop my way; For then despite of space I would be brought, From limits far remote where thou dost stay. No matter then although my foot did stand Upon the farthest earth removed from thee; For nimble thought can jump both sea and land As soon as think the place where he would be. But ah! thought kills me that I am not thought, To leap large lengths of miles when thou art gone, But that so much of earth and water wrought I must attend time's leisure with my moan, Receiving nought by elements so slow But heavy tears, badges of either's woe."""], ["XLV","""The other two, slight air and purging fire, Are both with thee, wherever I abide; The first my thought, the other my desire, These present-absent with swift motion slide. For when these quicker elements are gone In tender embassy of love to thee, My life, being made of four, with two alone Sinks down to death, oppress'd with melancholy; Until life's composition be recured By those swift messengers return'd from thee, Who even but now come back again, assured Of thy fair health, recounting it to me: This told, I joy; but then no longer glad, I send them back again and straight grow sad."""], ["XLVI","""Mine eye and heart are at a mortal war How to divide the conquest of thy sight; Mine eye my heart thy picture's sight would bar, My heart mine eye the freedom of that right. My heart doth plead that thou in him dost lie-- A closet never pierced with crystal eyes-- But the defendant doth that plea deny And says in him thy fair appearance lies. To 'cide this title is impanneled A quest of thoughts, all tenants to the heart, And by their verdict is determined The clear eye's moiety and the dear heart's part: As thus; mine eye's due is thy outward part, And my heart's right thy inward love of heart."""], ["XLVII","""Betwixt mine eye and heart a league is took, And each doth good turns now unto the other: When that mine eye is famish'd for a look, Or heart in love with sighs himself doth smother, With my love's picture then my eye doth feast And to the painted banquet bids my heart; Another time mine eye is my heart's guest And in his thoughts of love doth share a part: So, either by thy picture or my love, Thyself away art resent still with me; For thou not farther than my thoughts canst move, And I am still with them and they with thee; Or, if they sleep, thy picture in my sight Awakes my heart to heart's and eye's delight."""], ["XLVIII","""How careful was I, when I took my way, Each trifle under truest bars to thrust, That to my use it might unused stay From hands of falsehood, in sure wards of trust! But thou, to whom my jewels trifles are, Most worthy of comfort, now my greatest grief, Thou, best of dearest and mine only care, Art left the prey of every vulgar thief. Thee have I not lock'd up in any chest, Save where thou art not, though I feel thou art, Within the gentle closure of my breast, From whence at pleasure thou mayst come and part; And even thence thou wilt be stol'n, I fear, For truth proves thievish for a prize so dear."""], ["XLIX","""Against that time, if ever that time come, When I shall see thee frown on my defects, When as thy love hath cast his utmost sum, Call'd to that audit by advised respects; Against that time when thou shalt strangely pass And scarcely greet me with that sun thine eye, When love, converted from the thing it was, Shall reasons find of settled gravity,-- Against that time do I ensconce me here Within the knowledge of mine own desert, And this my hand against myself uprear, To guard the lawful reasons on thy part: To leave poor me thou hast the strength of laws, Since why to love I can allege no cause."""], ["L","""How heavy do I journey on the way, When what I seek, my weary travel's end, Doth teach that ease and that repose to say 'Thus far the miles are measured from thy friend!' The beast that bears me, tired with my woe, Plods dully on, to bear that weight in me, As if by some instinct the wretch did know His rider loved not speed, being made from thee: The bloody spur cannot provoke him on That sometimes anger thrusts into his hide; Which heavily he answers with a groan, More sharp to me than spurring to his side; For that same groan doth put this in my mind; My grief lies onward and my joy behind."""], ["LI","""Thus can my love excuse the slow offence Of my dull bearer when from thee I speed: From where thou art why should I haste me thence? Till I return, of posting is no need. O, what excuse will my poor beast then find, When swift extremity can seem but slow? Then should I spur, though mounted on the wind; In winged speed no motion shall I know: Then can no horse with my desire keep pace; Therefore desire of perfect'st love being made, Shall neigh--no dull flesh--in his fiery race; But love, for love, thus shall excuse my jade; Since from thee going he went wilful-slow, Towards thee I'll run, and give him leave to go."""], ["LII","""So am I as the rich, whose blessed key Can bring him to his sweet up-locked treasure, The which he will not every hour survey, For blunting the fine point of seldom pleasure. Therefore are feasts so solemn and so rare, Since, seldom coming, in the long year set, Like stones of worth they thinly placed are, Or captain jewels in the carcanet. So is the time that keeps you as my chest, Or as the wardrobe which the robe doth hide, To make some special instant special blest, By new unfolding his imprison'd pride. Blessed are you, whose worthiness gives scope, Being had, to triumph, being lack'd, to hope."""], ["LIII","""What is your substance, whereof are you made, That millions of strange shadows on you tend? Since every one hath, every one, one shade, And you, but one, can every shadow lend. Describe Adonis, and the counterfeit Is poorly imitated after you; On Helen's cheek all art of beauty set, And you in Grecian tires are painted new: Speak of the spring and foison of the year; The one doth shadow of your beauty show, The other as your bounty doth appear; And you in every blessed shape we know. In all external grace you have some part, But you like none, none you, for constant heart."""], ["LIV","""O, how much more doth beauty beauteous seem By that sweet ornament which truth doth give! The rose looks fair, but fairer we it deem For that sweet odour which doth in it live. The canker-blooms have full as deep a dye As the perfumed tincture of the roses, Hang on such thorns and play as wantonly When summer's breath their masked buds discloses: But, for their virtue only is their show, They live unwoo'd and unrespected fade, Die to themselves. Sweet roses do not so; Of their sweet deaths are sweetest odours made: And so of you, beauteous and lovely youth, When that shall fade, my verse distills your truth."""], ["LV","""Not marble, nor the gilded monuments Of princes, shall outlive this powerful rhyme; But you shall shine more bright in these contents Than unswept stone besmear'd with sluttish time. When wasteful war shall statues overturn, And broils root out the work of masonry, Nor Mars his sword nor war's quick fire shall burn The living record of your memory. 'Gainst death and all-oblivious enmity Shall you pace forth; your praise shall still find room Even in the eyes of all posterity That wear this world out to the ending doom. So, till the judgment that yourself arise, You live in this, and dwell in lover's eyes."""], ["LVI","""Sweet love, renew thy force; be it not said Thy edge should blunter be than appetite, Which but to-day by feeding is allay'd, To-morrow sharpen'd in his former might: So, love, be thou; although to-day thou fill Thy hungry eyes even till they wink with fullness, To-morrow see again, and do not kill The spirit of love with a perpetual dullness. Let this sad interim like the ocean be Which parts the shore, where two contracted new Come daily to the banks, that, when they see Return of love, more blest may be the view; Else call it winter, which being full of care Makes summer's welcome thrice more wish'd, more rare."""], ["LVII","""Being your slave, what should I do but tend Upon the hours and times of your desire? I have no precious time at all to spend, Nor services to do, till you require. Nor dare I chide the world-without-end hour Whilst I, my sovereign, watch the clock for you, Nor think the bitterness of absence sour When you have bid your servant once adieu; Nor dare I question with my jealous thought Where you may be, or your affairs suppose, But, like a sad slave, stay and think of nought Save, where you are how happy you make those. So true a fool is love that in your will, Though you do any thing, he thinks no ill."""], ["LVIII","""That god forbid that made me first your slave, I should in thought control your times of pleasure, Or at your hand the account of hours to crave, Being your vassal, bound to stay your leisure! O, let me suffer, being at your beck, The imprison'd absence of your liberty; And patience, tame to sufferance, bide each cheque, Without accusing you of injury. Be where you list, your charter is so strong That you yourself may privilege your time To what you will; to you it doth belong Yourself to pardon of self-doing crime. I am to wait, though waiting so be hell; Not blame your pleasure, be it ill or well."""], ["LIX","""If there be nothing new, but that which is Hath been before, how are our brains beguiled, Which, labouring for invention, bear amiss The second burden of a former child! O, that record could with a backward look, Even of five hundred courses of the sun, Show me your image in some antique book, Since mind at first in character was done! That I might see what the old world could say To this composed wonder of your frame; Whether we are mended, or whether better they, Or whether revolution be the same. O, sure I am, the wits of former days To subjects worse have given admiring praise."""], ["LX","""Like as the waves make towards the pebbled shore, So do our minutes hasten to their end; Each changing place with that which goes before, In sequent toil all forwards do contend. Nativity, once in the main of light, Crawls to maturity, wherewith being crown'd, Crooked elipses 'gainst his glory fight, And Time that gave doth now his gift confound. Time doth transfix the flourish set on youth And delves the parallels in beauty's brow, Feeds on the rarities of nature's truth, And nothing stands but for his scythe to mow: And yet to times in hope my verse shall stand, Praising thy worth, despite his cruel hand."""], ["LXI","""Is it thy will thy image should keep open My heavy eyelids to the weary night? Dost thou desire my slumbers should be broken, While shadows like to thee do mock my sight? Is it thy spirit that thou send'st from thee So far from home into my deeds to pry, To find out shames and idle hours in me, The scope and tenor of thy jealousy? O, no! thy love, though much, is not so great: It is my love that keeps mine eye awake; Mine own true love that doth my rest defeat, To play the watchman ever for thy sake: For thee watch I whilst thou dost wake elsewhere, From me far off, with others all too near."""], ["LXII","""Sin of self-love possesseth all mine eye And all my soul and all my every part; And for this sin there is no remedy, It is so grounded inward in my heart. Methinks no face so gracious is as mine, No shape so true, no truth of such account; And for myself mine own worth do define, As I all other in all worths surmount. But when my glass shows me myself indeed, Beated and chopp'd with tann'd antiquity, Mine own self-love quite contrary I read; Self so self-loving were iniquity. 'Tis thee, myself, that for myself I praise, Painting my age with beauty of thy days."""], ["LXIII","""Against my love shall be, as I am now, With Time's injurious hand crush'd and o'er-worn; When hours have drain'd his blood and fill'd his brow With lines and wrinkles; when his youthful morn Hath travell'd on to age's steepy night, And all those beauties whereof now he's king Are vanishing or vanish'd out of sight, Stealing away the treasure of his spring; For such a time do I now fortify Against confounding age's cruel knife, That he shall never cut from memory My sweet love's beauty, though my lover's life: His beauty shall in these black lines be seen, And they shall live, and he in them still green."""], ["LXIV","""When I have seen by Time's fell hand defaced The rich proud cost of outworn buried age; When sometime lofty towers I see down-razed And brass eternal slave to mortal rage; When I have seen the hungry ocean gain Advantage on the kingdom of the shore, And the firm soil win of the watery main, Increasing store with loss and loss with store; When I have seen such interchange of state, Or state itself confounded to decay; Ruin hath taught me thus to ruminate, That Time will come and take my love away. This thought is as a death, which cannot choose But weep to have that which it fears to lose."""], ["LXV","""Since brass, nor stone, nor earth, nor boundless sea, But sad mortality o'er-sways their power, How with this rage shall beauty hold a plea, Whose action is no stronger than a flower? O, how shall summer's honey breath hold out Against the wreckful siege of battering days, When rocks impregnable are not so stout, Nor gates of steel so strong, but Time decays? O fearful meditation! where, alack, Shall Time's best jewel from Time's chest lie hid? Or what strong hand can hold his swift foot back? Or who his spoil of beauty can forbid? O, none, unless this miracle have might, That in black ink my love may still shine bright."""], ["LXVI","""Tired with all these, for restful death I cry, As, to behold desert a beggar born, And needy nothing trimm'd in jollity, And purest faith unhappily forsworn, And guilded honour shamefully misplaced, And maiden virtue rudely strumpeted, And right perfection wrongfully disgraced, And strength by limping sway disabled, And art made tongue-tied by authority, And folly doctor-like controlling skill, And simple truth miscall'd simplicity, And captive good attending captain ill: Tired with all these, from these would I be gone, Save that, to die, I leave my love alone."""], ["LXVII","""Ah! wherefore with infection should he live, And with his presence grace impiety, That sin by him advantage should achieve And lace itself with his society? Why should false painting imitate his cheek And steal dead seeing of his living hue? Why should poor beauty indirectly seek Roses of shadow, since his rose is true? Why should he live, now Nature bankrupt is, Beggar'd of blood to blush through lively veins? For she hath no exchequer now but his, And, proud of many, lives upon his gains. O, him she stores, to show what wealth she had In days long since, before these last so bad."""], ["LXVIII","""Thus is his cheek the map of days outworn, When beauty lived and died as flowers do now, Before the bastard signs of fair were born, Or durst inhabit on a living brow; Before the golden tresses of the dead, The right of sepulchres, were shorn away, To live a second life on second head; Ere beauty's dead fleece made another gay: In him those holy antique hours are seen, Without all ornament, itself and true, Making no summer of another's green, Robbing no old to dress his beauty new; And him as for a map doth Nature store, To show false Art what beauty was of yore."""], ["LXIX","""Those parts of thee that the world's eye doth view Want nothing that the thought of hearts can mend; All tongues, the voice of souls, give thee that due, Uttering bare truth, even so as foes commend. Thy outward thus with outward praise is crown'd; But those same tongues that give thee so thine own In other accents do this praise confound By seeing farther than the eye hath shown. They look into the beauty of thy mind, And that, in guess, they measure by thy deeds; Then, churls, their thoughts, although their eyes were kind, To thy fair flower add the rank smell of weeds: But why thy odour matcheth not thy show, The solve is this, that thou dost common grow."""], ["LXX","""That thou art blamed shall not be thy defect, For slander's mark was ever yet the fair; The ornament of beauty is suspect, A crow that flies in heaven's sweetest air. So thou be good, slander doth but approve Thy worth the greater, being woo'd of time; For canker vice the sweetest buds doth love, And thou present'st a pure unstained prime. Thou hast pass'd by the ambush of young days, Either not assail'd or victor being charged; Yet this thy praise cannot be so thy praise, To tie up envy evermore enlarged: If some suspect of ill mask'd not thy show, Then thou alone kingdoms of hearts shouldst owe."""], ["LXXI","""No longer mourn for me when I am dead Then you shall hear the surly sullen bell Give warning to the world that I am fled From this vile world, with vilest worms to dwell: Nay, if you read this line, remember not The hand that writ it; for I love you so That I in your sweet thoughts would be forgot If thinking on me then should make you woe. O, if, I say, you look upon this verse When I perhaps compounded am with clay, Do not so much as my poor name rehearse. But let your love even with my life decay, Lest the wise world should look into your moan And mock you with me after I am gone."""], ["LXXII","""O, lest the world should task you to recite What merit lived in me, that you should love After my death, dear love, forget me quite, For you in me can nothing worthy prove; Unless you would devise some virtuous lie, To do more for me than mine own desert, And hang more praise upon deceased I Than niggard truth would willingly impart: O, lest your true love may seem false in this, That you for love speak well of me untrue, My name be buried where my body is, And live no more to shame nor me nor you. For I am shamed by that which I bring forth, And so should you, to love things nothing worth."""], ["LXXIII","""That time of year thou mayst in me behold When yellow leaves, or none, or few, do hang Upon those boughs which shake against the cold, Bare ruin'd choirs, where late the sweet birds sang. In me thou seest the twilight of such day As after sunset fadeth in the west, Which by and by black night doth take away, Death's second self, that seals up all in rest. In me thou see'st the glowing of such fire That on the ashes of his youth doth lie, As the death-bed whereon it must expire Consumed with that which it was nourish'd by. This thou perceivest, which makes thy love more strong, To love that well which thou must leave ere long."""], ["LXXIV","""But be contented: when that fell arrest Without all bail shall carry me away, My life hath in this line some interest, Which for memorial still with thee shall stay. When thou reviewest this, thou dost review The very part was consecrate to thee: The earth can have but earth, which is his due; My spirit is thine, the better part of me: So then thou hast but lost the dregs of life, The prey of worms, my body being dead, The coward conquest of a wretch's knife, Too base of thee to be remembered. The worth of that is that which it contains, And that is this, and this with thee remains."""], ["LXXV","""So are you to my thoughts as food to life, Or as sweet-season'd showers are to the ground; And for the peace of you I hold such strife As 'twixt a miser and his wealth is found; Now proud as an enjoyer and anon Doubting the filching age will steal his treasure, Now counting best to be with you alone, Then better'd that the world may see my pleasure; Sometime all full with feasting on your sight And by and by clean starved for a look; Possessing or pursuing no delight, Save what is had or must from you be took. Thus do I pine and surfeit day by day, Or gluttoning on all, or all away."""], ["LXXVI","""Why is my verse so barren of new pride, So far from variation or quick change? Why with the time do I not glance aside To new-found methods and to compounds strange? Why write I still all one, ever the same, And keep invention in a noted weed, That every word doth almost tell my name, Showing their birth and where they did proceed? O, know, sweet love, I always write of you, And you and love are still my argument; So all my best is dressing old words new, Spending again what is already spent: For as the sun is daily new and old, So is my love still telling what is told."""], ["LXXVII","""Thy glass will show thee how thy beauties wear, Thy dial how thy precious minutes waste; The vacant leaves thy mind's imprint will bear, And of this book this learning mayst thou taste. The wrinkles which thy glass will truly show Of mouthed graves will give thee memory; Thou by thy dial's shady stealth mayst know Time's thievish progress to eternity. Look, what thy memory can not contain Commit to these waste blanks, and thou shalt find Those children nursed, deliver'd from thy brain, To take a new acquaintance of thy mind. These offices, so oft as thou wilt look, Shall profit thee and much enrich thy book."""], ["LXXVIII","""So oft have I invoked thee for my Muse And found such fair assistance in my verse As every alien pen hath got my use And under thee their poesy disperse. Thine eyes that taught the dumb on high to sing And heavy ignorance aloft to fly Have added feathers to the learned's wing And given grace a double majesty. Yet be most proud of that which I compile, Whose influence is thine and born of thee: In others' works thou dost but mend the style, And arts with thy sweet graces graced be; But thou art all my art and dost advance As high as learning my rude ignorance."""], ["LXXIX","""Whilst I alone did call upon thy aid, My verse alone had all thy gentle grace, But now my gracious numbers are decay'd And my sick Muse doth give another place. I grant, sweet love, thy lovely argument Deserves the travail of a worthier pen, Yet what of thee thy poet doth invent He robs thee of and pays it thee again. He lends thee virtue and he stole that word From thy behavior; beauty doth he give And found it in thy cheek; he can afford No praise to thee but what in thee doth live. Then thank him not for that which he doth say, Since what he owes thee thou thyself dost pay."""], ["LXXX","""O, how I faint when I of you do write, Knowing a better spirit doth use your name, And in the praise thereof spends all his might, To make me tongue-tied, speaking of your fame! But since your worth, wide as the ocean is, The humble as the proudest sail doth bear, My saucy bark inferior far to his On your broad main doth wilfully appear. Your shallowest help will hold me up afloat, Whilst he upon your soundless deep doth ride; Or being wreck'd, I am a worthless boat, He of tall building and of goodly pride: Then if he thrive and I be cast away, The worst was this; my love was my decay."""], ["LXXXI","""Or I shall live your epitaph to make, Or you survive when I in earth am rotten; From hence your memory death cannot take, Although in me each part will be forgotten. Your name from hence immortal life shall have, Though I, once gone, to all the world must die: The earth can yield me but a common grave, When you entombed in men's eyes shall lie. Your monument shall be my gentle verse, Which eyes not yet created shall o'er-read, And tongues to be your being shall rehearse When all the breathers of this world are dead; You still shall live--such virtue hath my pen-- Where breath most breathes, even in the mouths of men."""], ["LXXXII","""I grant thou wert not married to my Muse And therefore mayst without attaint o'erlook The dedicated words which writers use Of their fair subject, blessing every book Thou art as fair in knowledge as in hue, Finding thy worth a limit past my praise, And therefore art enforced to seek anew Some fresher stamp of the time-bettering days And do so, love; yet when they have devised What strained touches rhetoric can lend, Thou truly fair wert truly sympathized In true plain words by thy true-telling friend; And their gross painting might be better used Where cheeks need blood; in thee it is abused."""], ["LXXXIII","""I never saw that you did painting need And therefore to your fair no painting set; I found, or thought I found, you did exceed The barren tender of a poet's debt; And therefore have I slept in your report, That you yourself being extant well might show How far a modern quill doth come too short, Speaking of worth, what worth in you doth grow. This silence for my sin you did impute, Which shall be most my glory, being dumb; For I impair not beauty being mute, When others would give life and bring a tomb. There lives more life in one of your fair eyes Than both your poets can in praise devise."""], ["LXXXIV","""Who is it that says most? which can say more Than this rich praise, that you alone are you? In whose confine immured is the store Which should example where your equal grew. Lean penury within that pen doth dwell That to his subject lends not some small glory; But he that writes of you, if he can tell That you are you, so dignifies his story, Let him but copy what in you is writ, Not making worse what nature made so clear, And such a counterpart shall fame his wit, Making his style admired every where. You to your beauteous blessings add a curse, Being fond on praise, which makes your praises worse."""], ["LXXXV","""My tongue-tied Muse in manners holds her still, While comments of your praise, richly compiled, Reserve their character with golden quill And precious phrase by all the Muses filed. I think good thoughts whilst other write good words, And like unletter'd clerk still cry 'Amen' To every hymn that able spirit affords In polish'd form of well-refined pen. Hearing you praised, I say ''Tis so, 'tis true,' And to the most of praise add something more; But that is in my thought, whose love to you, Though words come hindmost, holds his rank before. Then others for the breath of words respect, Me for my dumb thoughts, speaking in effect."""], ["LXXXVI","""Was it the proud full sail of his great verse, Bound for the prize of all too precious you, That did my ripe thoughts in my brain inhearse, Making their tomb the womb wherein they grew? Was it his spirit, by spirits taught to write Above a mortal pitch, that struck me dead? No, neither he, nor his compeers by night Giving him aid, my verse astonished. He, nor that affable familiar ghost Which nightly gulls him with intelligence As victors of my silence cannot boast; I was not sick of any fear from thence: But when your countenance fill'd up his line, Then lack'd I matter; that enfeebled mine."""], ["LXXXVII","""Farewell! thou art too dear for my possessing, And like enough thou know'st thy estimate: The charter of thy worth gives thee releasing; My bonds in thee are all determinate. For how do I hold thee but by thy granting? And for that riches where is my deserving? The cause of this fair gift in me is wanting, And so my patent back again is swerving. Thyself thou gavest, thy own worth then not knowing, Or me, to whom thou gavest it, else mistaking; So thy great gift, upon misprision growing, Comes home again, on better judgment making. Thus have I had thee, as a dream doth flatter, In sleep a king, but waking no such matter."""], ["LXXXVIII","""When thou shalt be disposed to set me light, And place my merit in the eye of scorn, Upon thy side against myself I'll fight, And prove thee virtuous, though thou art forsworn. With mine own weakness being best acquainted, Upon thy part I can set down a story Of faults conceal'd, wherein I am attainted, That thou in losing me shalt win much glory: And I by this will be a gainer too; For bending all my loving thoughts on thee, The injuries that to myself I do, Doing thee vantage, double-vantage me. Such is my love, to thee I so belong, That for thy right myself will bear all wrong."""], ["LXXXIX","""Say that thou didst forsake me for some fault, And I will comment upon that offence; Speak of my lameness, and I straight will halt, Against thy reasons making no defence. Thou canst not, love, disgrace me half so ill, To set a form upon desired change, As I'll myself disgrace: knowing thy will, I will acquaintance strangle and look strange, Be absent from thy walks, and in my tongue Thy sweet beloved name no more shall dwell, Lest I, too much profane, should do it wrong And haply of our old acquaintance tell. For thee against myself I'll vow debate, For I must ne'er love him whom thou dost hate."""], ["XC","""Then hate me when thou wilt; if ever, now; Now, while the world is bent my deeds to cross, Join with the spite of fortune, make me bow, And do not drop in for an after-loss: Ah, do not, when my heart hath 'scoped this sorrow, Come in the rearward of a conquer'd woe; Give not a windy night a rainy morrow, To linger out a purposed overthrow. If thou wilt leave me, do not leave me last, When other petty griefs have done their spite But in the onset come; so shall I taste At first the very worst of fortune's might, And other strains of woe, which now seem woe, Compared with loss of thee will not seem so."""], ["XCI","""Some glory in their birth, some in their skill, Some in their wealth, some in their bodies' force, Some in their garments, though new-fangled ill, Some in their hawks and hounds, some in their horse; And every humour hath his adjunct pleasure, Wherein it finds a joy above the rest: But these particulars are not my measure; All these I better in one general best. Thy love is better than high birth to me, Richer than wealth, prouder than garments' cost, Of more delight than hawks or horses be; And having thee, of all men's pride I boast: Wretched in this alone, that thou mayst take All this away and me most wretched make."""], ["XCII","""But do thy worst to steal thyself away, For term of life thou art assured mine, And life no longer than thy love will stay, For it depends upon that love of thine. Then need I not to fear the worst of wrongs, When in the least of them my life hath end. I see a better state to me belongs Than that which on thy humour doth depend; Thou canst not vex me with inconstant mind, Since that my life on thy revolt doth lie. O, what a happy title do I find, Happy to have thy love, happy to die! But what's so blessed-fair that fears no blot? Thou mayst be false, and yet I know it not."""], ["XCIII","""So shall I live, supposing thou art true, Like a deceived husband; so love's face May still seem love to me, though alter'd new; Thy looks with me, thy heart in other place: For there can live no hatred in thine eye, Therefore in that I cannot know thy change. In many's looks the false heart's history Is writ in moods and frowns and wrinkles strange, But heaven in thy creation did decree That in thy face sweet love should ever dwell; Whate'er thy thoughts or thy heart's workings be, Thy looks should nothing thence but sweetness tell. How like Eve's apple doth thy beauty grow, if thy sweet virtue answer not thy show!"""], ["XCIV","""They that have power to hurt and will do none, That do not do the thing they most do show, Who, moving others, are themselves as stone, Unmoved, cold, and to temptation slow, They rightly do inherit heaven's graces And husband nature's riches from expense; They are the lords and owners of their faces, Others but stewards of their excellence. The summer's flower is to the summer sweet, Though to itself it only live and die, But if that flower with base infection meet, The basest weed outbraves his dignity: For sweetest things turn sourest by their deeds; Lilies that fester smell far worse than weeds."""], ["XCV","""How sweet and lovely dost thou make the shame Which, like a canker in the fragrant rose, Doth spot the beauty of thy budding name! O, in what sweets dost thou thy sins enclose! That tongue that tells the story of thy days, Making lascivious comments on thy sport, Cannot dispraise but in a kind of praise; Naming thy name blesses an ill report. O, what a mansion have those vices got Which for their habitation chose out thee, Where beauty's veil doth cover every blot, And all things turn to fair that eyes can see! Take heed, dear heart, of this large privilege; The hardest knife ill-used doth lose his edge."""], ["XCVI","""Some say thy fault is youth, some wantonness; Some say thy grace is youth and gentle sport; Both grace and faults are loved of more and less; Thou makest faults graces that to thee resort. As on the finger of a throned queen The basest jewel will be well esteem'd, So are those errors that in thee are seen To truths translated and for true things deem'd. How many lambs might the stem wolf betray, If like a lamb he could his looks translate! How many gazers mightst thou lead away, If thou wouldst use the strength of all thy state! But do not so; I love thee in such sort As, thou being mine, mine is thy good report."""], ["XCVII","""How like a winter hath my absence been From thee, the pleasure of the fleeting year! What freezings have I felt, what dark days seen! What old December's bareness every where! And yet this time removed was summer's time, The teeming autumn, big with rich increase, Bearing the wanton burden of the prime, Like widow'd wombs after their lords' decease: Yet this abundant issue seem'd to me But hope of orphans and unfather'd fruit; For summer and his pleasures wait on thee, And, thou away, the very birds are mute; Or, if they sing, 'tis with so dull a cheer That leaves look pale, dreading the winter's near."""], ["XCVIII","""From you have I been absent in the spring, When proud-pied April dress'd in all his trim Hath put a spirit of youth in every thing, That heavy Saturn laugh'd and leap'd with him. Yet nor the lays of birds nor the sweet smell Of different flowers in odour and in hue Could make me any summer's story tell, Or from their proud lap pluck them where they grew; Nor did I wonder at the lily's white, Nor praise the deep vermilion in the rose; They were but sweet, but figures of delight, Drawn after you, you pattern of all those. Yet seem'd it winter still, and, you away, As with your shadow I with these did play:"""], ["XCIX","""The forward violet thus did I chide: Sweet thief, whence didst thou steal thy sweet that smells, If not from my love's breath? The purple pride Which on thy soft cheek for complexion dwells In my love's veins thou hast too grossly dyed. The lily I condemned for thy hand, And buds of marjoram had stol'n thy hair: The roses fearfully on thorns did stand, One blushing shame, another white despair; A third, nor red nor white, had stol'n of both And to his robbery had annex'd thy breath; But, for his theft, in pride of all his growth A vengeful canker eat him up to death. More flowers I noted, yet I none could see But sweet or colour it had stol'n from thee."""], ["C","""Where art thou, Muse, that thou forget'st so long To speak of that which gives thee all thy might? Spend'st thou thy fury on some worthless song, Darkening thy power to lend base subjects light? Return, forgetful Muse, and straight redeem In gentle numbers time so idly spent; Sing to the ear that doth thy lays esteem And gives thy pen both skill and argument. Rise, resty Muse, my love's sweet face survey, If Time have any wrinkle graven there; If any, be a satire to decay, And make Time's spoils despised every where. Give my love fame faster than Time wastes life; So thou prevent'st his scythe and crooked knife."""], ["CI","""O truant Muse, what shall be thy amends For thy neglect of truth in beauty dyed? Both truth and beauty on my love depends; So dost thou too, and therein dignified. Make answer, Muse: wilt thou not haply say 'Truth needs no colour, with his colour fix'd; Beauty no pencil, beauty's truth to lay; But best is best, if never intermix'd?' Because he needs no praise, wilt thou be dumb? Excuse not silence so; for't lies in thee To make him much outlive a gilded tomb, And to be praised of ages yet to be. Then do thy office, Muse; I teach thee how To make him seem long hence as he shows now."""], ["CII","""My love is strengthen'd, though more weak in seeming; I love not less, though less the show appear: That love is merchandized whose rich esteeming The owner's tongue doth publish every where. Our love was new and then but in the spring When I was wont to greet it with my lays, As Philomel in summer's front doth sing And stops her pipe in growth of riper days: Not that the summer is less pleasant now Than when her mournful hymns did hush the night, But that wild music burthens every bough And sweets grown common lose their dear delight. Therefore like her I sometime hold my tongue, Because I would not dull you with my song."""], ["CIII","""Alack, what poverty my Muse brings forth, That having such a scope to show her pride, The argument all bare is of more worth Than when it hath my added praise beside! O, blame me not, if I no more can write! Look in your glass, and there appears a face That over-goes my blunt invention quite, Dulling my lines and doing me disgrace. Were it not sinful then, striving to mend, To mar the subject that before was well? For to no other pass my verses tend Than of your graces and your gifts to tell; And more, much more, than in my verse can sit Your own glass shows you when you look in it."""], ["CIV","""To me, fair friend, you never can be old, For as you were when first your eye I eyed, Such seems your beauty still. Three winters cold Have from the forests shook three summers' pride, Three beauteous springs to yellow autumn turn'd In process of the seasons have I seen, Three April perfumes in three hot Junes burn'd, Since first I saw you fresh, which yet are green. Ah! yet doth beauty, like a dial-hand, Steal from his figure and no pace perceived; So your sweet hue, which methinks still doth stand, Hath motion and mine eye may be deceived: For fear of which, hear this, thou age unbred; Ere you were born was beauty's summer dead."""], ["CV","""Let not my love be call'd idolatry, Nor my beloved as an idol show, Since all alike my songs and praises be To one, of one, still such, and ever so. Kind is my love to-day, to-morrow kind, Still constant in a wondrous excellence; Therefore my verse to constancy confined, One thing expressing, leaves out difference. 'Fair, kind and true' is all my argument, 'Fair, kind, and true' varying to other words; And in this change is my invention spent, Three themes in one, which wondrous scope affords. 'Fair, kind, and true,' have often lived alone, Which three till now never kept seat in one."""], ["CVI","""When in the chronicle of wasted time I see descriptions of the fairest wights, And beauty making beautiful old rhyme In praise of ladies dead and lovely knights, Then, in the blazon of sweet beauty's best, Of hand, of foot, of lip, of eye, of brow, I see their antique pen would have express'd Even such a beauty as you master now. So all their praises are but prophecies Of this our time, all you prefiguring; And, for they look'd but with divining eyes, They had not skill enough your worth to sing: For we, which now behold these present days, Had eyes to wonder, but lack tongues to praise."""], ["CVII","""Not mine own fears, nor the prophetic soul Of the wide world dreaming on things to come, Can yet the lease of my true love control, Supposed as forfeit to a confined doom. The mortal moon hath her eclipse endured And the sad augurs mock their own presage; Incertainties now crown themselves assured And peace proclaims olives of endless age. Now with the drops of this most balmy time My love looks fresh, and death to me subscribes, Since, spite of him, I'll live in this poor rhyme, While he insults o'er dull and speechless tribes: And thou in this shalt find thy monument, When tyrants' crests and tombs of brass are spent."""], ["CVIII","""What's in the brain that ink may character Which hath not figured to thee my true spirit? What's new to speak, what new to register, That may express my love or thy dear merit? Nothing, sweet boy; but yet, like prayers divine, I must, each day say o'er the very same, Counting no old thing old, thou mine, I thine, Even as when first I hallow'd thy fair name. So that eternal love in love's fresh case Weighs not the dust and injury of age, Nor gives to necessary wrinkles place, But makes antiquity for aye his page, Finding the first conceit of love there bred Where time and outward form would show it dead."""], ["CIX","""O, never say that I was false of heart, Though absence seem'd my flame to qualify. As easy might I from myself depart As from my soul, which in thy breast doth lie: That is my home of love: if I have ranged, Like him that travels I return again, Just to the time, not with the time exchanged, So that myself bring water for my stain. Never believe, though in my nature reign'd All frailties that besiege all kinds of blood, That it could so preposterously be stain'd, To leave for nothing all thy sum of good; For nothing this wide universe I call, Save thou, my rose; in it thou art my all."""], ["CX","""Alas, 'tis true I have gone here and there And made myself a motley to the view, Gored mine own thoughts, sold cheap what is most dear, Made old offences of affections new; Most true it is that I have look'd on truth Askance and strangely: but, by all above, These blenches gave my heart another youth, And worse essays proved thee my best of love. Now all is done, have what shall have no end: Mine appetite I never more will grind On newer proof, to try an older friend, A god in love, to whom I am confined. Then give me welcome, next my heaven the best, Even to thy pure and most most loving breast."""], ["CXI","""O, for my sake do you with Fortune chide, The guilty goddess of my harmful deeds, That did not better for my life provide Than public means which public manners breeds. Thence comes it that my name receives a brand, And almost thence my nature is subdued To what it works in, like the dyer's hand: Pity me then and wish I were renew'd; Whilst, like a willing patient, I will drink Potions of eisel 'gainst my strong infection No bitterness that I will bitter think, Nor double penance, to correct correction. Pity me then, dear friend, and I assure ye Even that your pity is enough to cure me."""], ["CXII","""Your love and pity doth the impression fill Which vulgar scandal stamp'd upon my brow; For what care I who calls me well or ill, So you o'er-green my bad, my good allow? You are my all the world, and I must strive To know my shames and praises from your tongue: None else to me, nor I to none alive, That my steel'd sense or changes right or wrong. In so profound abysm I throw all care Of others' voices, that my adder's sense To critic and to flatterer stopped are. Mark how with my neglect I do dispense: You are so strongly in my purpose bred That all the world besides methinks are dead."""], ["CXIII","""Since I left you, mine eye is in my mind; And that which governs me to go about Doth part his function and is partly blind, Seems seeing, but effectually is out; For it no form delivers to the heart Of bird of flower, or shape, which it doth latch: Of his quick objects hath the mind no part, Nor his own vision holds what it doth catch: For if it see the rudest or gentlest sight, The most sweet favour or deformed'st creature, The mountain or the sea, the day or night, The crow or dove, it shapes them to your feature: Incapable of more, replete with you, My most true mind thus makes mine eye untrue."""], ["CXIV","""Or whether doth my mind, being crown'd with you, Drink up the monarch's plague, this flattery? Or whether shall I say, mine eye saith true, And that your love taught it this alchemy, To make of monsters and things indigest Such cherubins as your sweet self resemble, Creating every bad a perfect best, As fast as objects to his beams assemble? O,'tis the first; 'tis flattery in my seeing, And my great mind most kingly drinks it up: Mine eye well knows what with his gust is 'greeing, And to his palate doth prepare the cup: If it be poison'd, 'tis the lesser sin That mine eye loves it and doth first begin."""], ["CXV","""Those lines that I before have writ do lie, Even those that said I could not love you dearer: Yet then my judgment knew no reason why My most full flame should afterwards burn clearer. But reckoning time, whose million'd accidents Creep in 'twixt vows and change decrees of kings, Tan sacred beauty, blunt the sharp'st intents, Divert strong minds to the course of altering things; Alas, why, fearing of time's tyranny, Might I not then say 'Now I love you best,' When I was certain o'er incertainty, Crowning the present, doubting of the rest? Love is a babe; then might I not say so, To give full growth to that which still doth grow?"""], ["CXVI","""Let me not to the marriage of true minds Admit impediments. Love is not love Which alters when it alteration finds, Or bends with the remover to remove: O no! it is an ever-fixed mark That looks on tempests and is never shaken; It is the star to every wandering bark, Whose worth's unknown, although his height be taken. Love's not Time's fool, though rosy lips and cheeks Within his bending sickle's compass come: Love alters not with his brief hours and weeks, But bears it out even to the edge of doom. If this be error and upon me proved, I never writ, nor no man ever loved."""], ["CXVII","""Accuse me thus: that I have scanted all Wherein I should your great deserts repay, Forgot upon your dearest love to call, Whereto all bonds do tie me day by day; That I have frequent been with unknown minds And given to time your own dear-purchased right That I have hoisted sail to all the winds Which should transport me farthest from your sight. Book both my wilfulness and errors down And on just proof surmise accumulate; Bring me within the level of your frown, But shoot not at me in your waken'd hate; Since my appeal says I did strive to prove The constancy and virtue of your love."""], ["CXVIII","""Like as, to make our appetites more keen, With eager compounds we our palate urge, As, to prevent our maladies unseen, We sicken to shun sickness when we purge, Even so, being tuff of your ne'er-cloying sweetness, To bitter sauces did I frame my feeding And, sick of welfare, found a kind of meetness To be diseased ere that there was true needing. Thus policy in love, to anticipate The ills that were not, grew to faults assured And brought to medicine a healthful state Which, rank of goodness, would by ill be cured: But thence I learn, and find the lesson true, Drugs poison him that so fell sick of you."""], ["CXIX","""What potions have I drunk of Siren tears, Distill'd from limbecks foul as hell within, Applying fears to hopes and hopes to fears, Still losing when I saw myself to win! What wretched errors hath my heart committed, Whilst it hath thought itself so blessed never! How have mine eyes out of their spheres been fitted In the distraction of this madding fever! O benefit of ill! now I find true That better is by evil still made better; And ruin'd love, when it is built anew, Grows fairer than at first, more strong, far greater. So I return rebuked to my content And gain by ill thrice more than I have spent."""], ["CXX","""That you were once unkind befriends me now, And for that sorrow which I then did feel Needs must I under my transgression bow, Unless my nerves were brass or hammer'd steel. For if you were by my unkindness shaken As I by yours, you've pass'd a hell of time, And I, a tyrant, have no leisure taken To weigh how once I suffered in your crime. O, that our night of woe might have remember'd My deepest sense, how hard true sorrow hits, And soon to you, as you to me, then tender'd The humble slave which wounded bosoms fits! But that your trespass now becomes a fee; Mine ransoms yours, and yours must ransom me."""], ["CXXI","""'Tis better to be vile than vile esteem'd, When not to be receives reproach of being, And the just pleasure lost which is so deem'd Not by our feeling but by others' seeing: For why should others false adulterate eyes Give salutation to my sportive blood? Or on my frailties why are frailer spies, Which in their wills count bad what I think good? No, I am that I am, and they that level At my abuses reckon up their own: I may be straight, though they themselves be bevel; By their rank thoughts my deeds must not be shown; Unless this general evil they maintain, All men are bad, and in their badness reign."""], ["CXXII","""Thy gift, thy tables, are within my brain Full character'd with lasting memory, Which shall above that idle rank remain Beyond all date, even to eternity; Or at the least, so long as brain and heart Have faculty by nature to subsist; Till each to razed oblivion yield his part Of thee, thy record never can be miss'd. That poor retention could not so much hold, Nor need I tallies thy dear love to score; Therefore to give them from me was I bold, To trust those tables that receive thee more: To keep an adjunct to remember thee Were to import forgetfulness in me."""], ["CXXIII","""No, Time, thou shalt not boast that I do change: Thy pyramids built up with newer might To me are nothing novel, nothing strange; They are but dressings of a former sight. Our dates are brief, and therefore we admire What thou dost foist upon us that is old, And rather make them born to our desire Than think that we before have heard them told. Thy registers and thee I both defy, Not wondering at the present nor the past, For thy records and what we see doth lie, Made more or less by thy continual haste. This I do vow and this shall ever be; I will be true, despite thy scythe and thee."""], ["CXXIV","""If my dear love were but the child of state, It might for Fortune's bastard be unfather'd' As subject to Time's love or to Time's hate, Weeds among weeds, or flowers with flowers gather'd. No, it was builded far from accident; It suffers not in smiling pomp, nor falls Under the blow of thralled discontent, Whereto the inviting time our fashion calls: It fears not policy, that heretic, Which works on leases of short-number'd hours, But all alone stands hugely politic, That it nor grows with heat nor drowns with showers. To this I witness call the fools of time, Which die for goodness, who have lived for crime."""], ["CXXV","""Were 't aught to me I bore the canopy, With my extern the outward honouring, Or laid great bases for eternity, Which prove more short than waste or ruining? Have I not seen dwellers on form and favour Lose all, and more, by paying too much rent, For compound sweet forgoing simple savour, Pitiful thrivers, in their gazing spent? No, let me be obsequious in thy heart, And take thou my oblation, poor but free, Which is not mix'd with seconds, knows no art, But mutual render, only me for thee. Hence, thou suborn'd informer! a true soul When most impeach'd stands least in thy control."""], ["CXXVI","""O thou, my lovely boy, who in thy power Dost hold Time's fickle glass, his sickle, hour; Who hast by waning grown, and therein show'st Thy lovers withering as thy sweet self grow'st; If Nature, sovereign mistress over wrack, As thou goest onwards, still will pluck thee back, She keeps thee to this purpose, that her skill May time disgrace and wretched minutes kill. Yet fear her, O thou minion of her pleasure! She may detain, but not still keep, her treasure: Her audit, though delay'd, answer'd must be, And her quietus is to render thee."""], ["CXXVII","""In the old age black was not counted fair, Or if it were, it bore not beauty's name; But now is black beauty's successive heir, And beauty slander'd with a bastard shame: For since each hand hath put on nature's power, Fairing the foul with art's false borrow'd face, Sweet beauty hath no name, no holy bower, But is profaned, if not lives in disgrace. Therefore my mistress' brows are raven black, Her eyes so suited, and they mourners seem At such who, not born fair, no beauty lack, Slandering creation with a false esteem: Yet so they mourn, becoming of their woe, That every tongue says beauty should look so."""], ["CXXVIII","""How oft, when thou, my music, music play'st, Upon that blessed wood whose motion sounds With thy sweet fingers, when thou gently sway'st The wiry concord that mine ear confounds, Do I envy those jacks that nimble leap To kiss the tender inward of thy hand, Whilst my poor lips, which should that harvest reap, At the wood's boldness by thee blushing stand! To be so tickled, they would change their state And situation with those dancing chips, O'er whom thy fingers walk with gentle gait, Making dead wood more blest than living lips. Since saucy jacks so happy are in this, Give them thy fingers, me thy lips to kiss."""], ["CXXIX","""The expense of spirit in a waste of shame Is lust in action; and till action, lust Is perjured, murderous, bloody, full of blame, Savage, extreme, rude, cruel, not to trust, Enjoy'd no sooner but despised straight, Past reason hunted, and no sooner had Past reason hated, as a swallow'd bait On purpose laid to make the taker mad; Mad in pursuit and in possession so; Had, having, and in quest to have, extreme; A bliss in proof, and proved, a very woe; Before, a joy proposed; behind, a dream. All this the world well knows; yet none knows well To shun the heaven that leads men to this hell."""], ["CXXX","""My mistress' eyes are nothing like the sun; Coral is far more red than her lips' red; If snow be white, why then her breasts are dun; If hairs be wires, black wires grow on her head. I have seen roses damask'd, red and white, But no such roses see I in her cheeks; And in some perfumes is there more delight Than in the breath that from my mistress reeks. I love to hear her speak, yet well I know That music hath a far more pleasing sound; I grant I never saw a goddess go; My mistress, when she walks, treads on the ground: And yet, by heaven, I think my love as rare As any she belied with false compare."""], ["CXXXI","""Thou art as tyrannous, so as thou art, As those whose beauties proudly make them cruel; For well thou know'st to my dear doting heart Thou art the fairest and most precious jewel. Yet, in good faith, some say that thee behold Thy face hath not the power to make love groan: To say they err I dare not be so bold, Although I swear it to myself alone. And, to be sure that is not false I swear, A thousand groans, but thinking on thy face, One on another's neck, do witness bear Thy black is fairest in my judgment's place. In nothing art thou black save in thy deeds, And thence this slander, as I think, proceeds."""], ["CXXXII","""Thine eyes I love, and they, as pitying me, Knowing thy heart torments me with disdain, Have put on black and loving mourners be, Looking with pretty ruth upon my pain. And truly not the morning sun of heaven Better becomes the grey cheeks of the east, Nor that full star that ushers in the even Doth half that glory to the sober west, As those two mourning eyes become thy face: O, let it then as well beseem thy heart To mourn for me, since mourning doth thee grace, And suit thy pity like in every part. Then will I swear beauty herself is black And all they foul that thy complexion lack."""], ["CXXXIII","""Beshrew that heart that makes my heart to groan For that deep wound it gives my friend and me! Is't not enough to torture me alone, But slave to slavery my sweet'st friend must be? Me from myself thy cruel eye hath taken, And my next self thou harder hast engross'd: Of him, myself, and thee, I am forsaken; A torment thrice threefold thus to be cross'd. Prison my heart in thy steel bosom's ward, But then my friend's heart let my poor heart bail; Whoe'er keeps me, let my heart be his guard; Thou canst not then use rigor in my gaol: And yet thou wilt; for I, being pent in thee, Perforce am thine, and all that is in me."""], ["CXXXIV","""So, now I have confess'd that he is thine, And I myself am mortgaged to thy will, Myself I'll forfeit, so that other mine Thou wilt restore, to be my comfort still: But thou wilt not, nor he will not be free, For thou art covetous and he is kind; He learn'd but surety-like to write for me Under that bond that him as fast doth bind. The statute of thy beauty thou wilt take, Thou usurer, that put'st forth all to use, And sue a friend came debtor for my sake; So him I lose through my unkind abuse. Him have I lost; thou hast both him and me: He pays the whole, and yet am I not free."""], ["CXXXV","""Whoever hath her wish, thou hast thy 'Will,' And 'Will' to boot, and 'Will' in overplus; More than enough am I that vex thee still, To thy sweet will making addition thus. Wilt thou, whose will is large and spacious, Not once vouchsafe to hide my will in thine? Shall will in others seem right gracious, And in my will no fair acceptance shine? The sea all water, yet receives rain still And in abundance addeth to his store; So thou, being rich in 'Will,' add to thy 'Will' One will of mine, to make thy large 'Will' more. Let no unkind, no fair beseechers kill; Think all but one, and me in that one 'Will.'"""], ["CXXXVI","""If thy soul cheque thee that I come so near, Swear to thy blind soul that I was thy 'Will,' And will, thy soul knows, is admitted there; Thus far for love my love-suit, sweet, fulfil. 'Will' will fulfil the treasure of thy love, Ay, fill it full with wills, and my will one. In things of great receipt with ease we prove Among a number one is reckon'd none: Then in the number let me pass untold, Though in thy stores' account I one must be; For nothing hold me, so it please thee hold That nothing me, a something sweet to thee: Make but my name thy love, and love that still, And then thou lovest me, for my name is 'Will.'"""], ["CXXXVII","""Thou blind fool, Love, what dost thou to mine eyes, That they behold, and see not what they see? They know what beauty is, see where it lies, Yet what the best is take the worst to be. If eyes corrupt by over-partial looks Be anchor'd in the bay where all men ride, Why of eyes' falsehood hast thou forged hooks, Whereto the judgment of my heart is tied? Why should my heart think that a several plot Which my heart knows the wide world's common place? Or mine eyes seeing this, say this is not, To put fair truth upon so foul a face? In things right true my heart and eyes have erred, And to this false plague are they now transferr'd."""], ["CXXXVIII","""When my love swears that she is made of truth I do believe her, though I know she lies, That she might think me some untutor'd youth, Unlearned in the world's false subtleties. Thus vainly thinking that she thinks me young, Although she knows my days are past the best, Simply I credit her false speaking tongue: On both sides thus is simple truth suppress'd. But wherefore says she not she is unjust? And wherefore say not I that I am old? O, love's best habit is in seeming trust, And age in love loves not to have years told: Therefore I lie with her and she with me, And in our faults by lies we flatter'd be."""], ["CXXXIX","""O, call not me to justify the wrong That thy unkindness lays upon my heart; Wound me not with thine eye but with thy tongue; Use power with power and slay me not by art. Tell me thou lovest elsewhere, but in my sight, Dear heart, forbear to glance thine eye aside: What need'st thou wound with cunning when thy might Is more than my o'er-press'd defense can bide? Let me excuse thee: ah! my love well knows Her pretty looks have been mine enemies, And therefore from my face she turns my foes, That they elsewhere might dart their injuries: Yet do not so; but since I am near slain, Kill me outright with looks and rid my pain."""], ["CXL","""Be wise as thou art cruel; do not press My tongue-tied patience with too much disdain; Lest sorrow lend me words and words express The manner of my pity-wanting pain. If I might teach thee wit, better it were, Though not to love, yet, love, to tell me so; As testy sick men, when their deaths be near, No news but health from their physicians know; For if I should despair, I should grow mad, And in my madness might speak ill of thee: Now this ill-wresting world is grown so bad, Mad slanderers by mad ears believed be, That I may not be so, nor thou belied, Bear thine eyes straight, though thy proud heart go wide."""], ["CXLI","""In faith, I do not love thee with mine eyes, For they in thee a thousand errors note; But 'tis my heart that loves what they despise, Who in despite of view is pleased to dote; Nor are mine ears with thy tongue's tune delighted, Nor tender feeling, to base touches prone, Nor taste, nor smell, desire to be invited To any sensual feast with thee alone: But my five wits nor my five senses can Dissuade one foolish heart from serving thee, Who leaves unsway'd the likeness of a man, Thy proud hearts slave and vassal wretch to be: Only my plague thus far I count my gain, That she that makes me sin awards me pain."""], ["CXLII","""Love is my sin and thy dear virtue hate, Hate of my sin, grounded on sinful loving: O, but with mine compare thou thine own state, And thou shalt find it merits not reproving; Or, if it do, not from those lips of thine, That have profaned their scarlet ornaments And seal'd false bonds of love as oft as mine, Robb'd others' beds' revenues of their rents. Be it lawful I love thee, as thou lovest those Whom thine eyes woo as mine importune thee: Root pity in thy heart, that when it grows Thy pity may deserve to pitied be. If thou dost seek to have what thou dost hide, By self-example mayst thou be denied!"""], ["CXLIII","""Lo! as a careful housewife runs to catch One of her feather'd creatures broke away, Sets down her babe and makes an swift dispatch In pursuit of the thing she would have stay, Whilst her neglected child holds her in chase, Cries to catch her whose busy care is bent To follow that which flies before her face, Not prizing her poor infant's discontent; So runn'st thou after that which flies from thee, Whilst I thy babe chase thee afar behind; But if thou catch thy hope, turn back to me, And play the mother's part, kiss me, be kind: So will I pray that thou mayst have thy 'Will,' If thou turn back, and my loud crying still."""], ["CXLIV","""Two loves I have of comfort and despair, Which like two spirits do suggest me still: The better angel is a man right fair, The worser spirit a woman colour'd ill. To win me soon to hell, my female evil Tempteth my better angel from my side, And would corrupt my saint to be a devil, Wooing his purity with her foul pride. And whether that my angel be turn'd fiend Suspect I may, but not directly tell; But being both from me, both to each friend, I guess one angel in another's hell: Yet this shall I ne'er know, but live in doubt, Till my bad angel fire my good one out."""], ["CXLV","""Those lips that Love's own hand did make Breathed forth the sound that said 'I hate' To me that languish'd for her sake; But when she saw my woeful state, Straight in her heart did mercy come, Chiding that tongue that ever sweet Was used in giving gentle doom, And taught it thus anew to greet: 'I hate' she alter'd with an end, That follow'd it as gentle day Doth follow night, who like a fiend From heaven to hell is flown away; 'I hate' from hate away she threw, And saved my life, saying 'not you.'"""], ["CXLVI","""Poor soul, the centre of my sinful earth, [ ] these rebel powers that thee array; Why dost thou pine within and suffer dearth, Painting thy outward walls so costly gay? Why so large cost, having so short a lease, Dost thou upon thy fading mansion spend? Shall worms, inheritors of this excess, Eat up thy charge? is this thy body's end? Then soul, live thou upon thy servant's loss, And let that pine to aggravate thy store; Buy terms divine in selling hours of dross; Within be fed, without be rich no more: So shalt thou feed on Death, that feeds on men, And Death once dead, there's no more dying then."""], ["CXLVII","""My love is as a fever, longing still For that which longer nurseth the disease, Feeding on that which doth preserve the ill, The uncertain sickly appetite to please. My reason, the physician to my love, Angry that his prescriptions are not kept, Hath left me, and I desperate now approve Desire is death, which physic did except. Past cure I am, now reason is past care, And frantic-mad with evermore unrest; My thoughts and my discourse as madmen's are, At random from the truth vainly express'd; For I have sworn thee fair and thought thee bright, Who art as black as hell, as dark as night."""], ["CXLVIII","""O me, what eyes hath Love put in my head, Which have no correspondence with true sight! Or, if they have, where is my judgment fled, That censures falsely what they see aright? If that be fair whereon my false eyes dote, What means the world to say it is not so? If it be not, then love doth well denote Love's eye is not so true as all men's 'No.' How can it? O, how can Love's eye be true, That is so vex'd with watching and with tears? No marvel then, though I mistake my view; The sun itself sees not till heaven clears. O cunning Love! with tears thou keep'st me blind, Lest eyes well-seeing thy foul faults should find."""], ["CXLIX","""Canst thou, O cruel! say I love thee not, When I against myself with thee partake? Do I not think on thee, when I forgot Am of myself, all tyrant, for thy sake? Who hateth thee that I do call my friend? On whom frown'st thou that I do fawn upon? Nay, if thou lour'st on me, do I not spend Revenge upon myself with present moan? What merit do I in myself respect, That is so proud thy service to despise, When all my best doth worship thy defect, Commanded by the motion of thine eyes? But, love, hate on, for now I know thy mind; Those that can see thou lovest, and I am blind."""], ["CL","""O, from what power hast thou this powerful might With insufficiency my heart to sway? To make me give the lie to my true sight, And swear that brightness doth not grace the day? Whence hast thou this becoming of things ill, That in the very refuse of thy deeds There is such strength and warrantize of skill That, in my mind, thy worst all best exceeds? Who taught thee how to make me love thee more The more I hear and see just cause of hate? O, though I love what others do abhor, With others thou shouldst not abhor my state: If thy unworthiness raised love in me, More worthy I to be beloved of thee."""], ["CLI","""Love is too young to know what conscience is; Yet who knows not conscience is born of love? Then, gentle cheater, urge not my amiss, Lest guilty of my faults thy sweet self prove: For, thou betraying me, I do betray My nobler part to my gross body's treason; My soul doth tell my body that he may Triumph in love; flesh stays no father reason; But, rising at thy name, doth point out thee As his triumphant prize. Proud of this pride, He is contented thy poor drudge to be, To stand in thy affairs, fall by thy side. No want of conscience hold it that I call Her 'love' for whose dear love I rise and fall."""], ["CLII","""Love is too young to know what conscience is; Yet who knows not conscience is born of love? Then, gentle cheater, urge not my amiss, Lest guilty of my faults thy sweet self prove: For, thou betraying me, I do betray My nobler part to my gross body's treason; My soul doth tell my body that he may Triumph in love; flesh stays no father reason; But, rising at thy name, doth point out thee As his triumphant prize. Proud of this pride, He is contented thy poor drudge to be, To stand in thy affairs, fall by thy side. No want of conscience hold it that I call Her 'love' for whose dear love I rise and fall."""], ["CLIII","""Cupid laid by his brand, and fell asleep: A maid of Dian's this advantage found, And his love-kindling fire did quickly steep In a cold valley-fountain of that ground; Which borrow'd from this holy fire of Love A dateless lively heat, still to endure, And grew a seething bath, which yet men prove Against strange maladies a sovereign cure. But at my mistress' eye Love's brand new-fired, The boy for trial needs would touch my breast; I, sick withal, the help of bath desired, And thither hied, a sad distemper'd guest, But found no cure: the bath for my help lies Where Cupid got new fire--my mistress' eyes."""]]}
[ [ 14, 0, 0.5002, 1, 0, 0.66, 0, 344, 0, 0, 0, 0, 0, 6, 0 ] ]
[ "verses={\"verses\":[[\"I\",\"\"\"FROM fairest creatures we desire increase,\nThat thereby beauty's rose might never die,\nBut as the riper should by time decease,\nHis tender heir might bear his memory:\nBut thou, contracted to thine own bright eyes,\nFeed'st thy light'st flame with self-substantial fuel,\nMaking ...
#!/usr/bin/env python import wsgiref.handlers from google.appengine.api import users from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app from google.appengine.ext import db from google.appengine.ext.webapp import template from django.utils import simplejson from touchengine.plistHandler import PlistHandler from models import * from dateutil import parser import datetime import logging import os class MainPage(webapp.RequestHandler): """Main Page View""" def get(self): user = users.get_current_user() if user: url = users.create_logout_url(self.request.uri) url_linktext = 'Logout' else: url = None url_linktext = None self.redirect(users.create_login_url(self.request.uri)) template_values = { 'url': url, 'url_linktext': url_linktext, 'username': users.get_current_user(), } path = os.path.join(os.path.dirname(__file__), 'base.html') self.response.out.write(template.render(path, template_values)) class MainPageLibrary(webapp.RequestHandler): """Main Page View With Library Grid""" def get(self): user = users.get_current_user() if users.get_current_user(): url = users.create_logout_url(self.request.uri) url_linktext = 'Logout' else: url = None url_linktext = None self.redirect(users.create_login_url(self.request.uri)) template_values = { 'url': url, 'url_linktext': url_linktext, 'username': users.get_current_user(), } path = os.path.join(os.path.dirname(__file__), 'library.html') self.response.out.write(template.render(path, template_values)) class Recent(webapp.RequestHandler): """Query Last 10 Requests""" def get(self): #collection collection = [] #grab last 10 records from datastore query = Book.all().order('-date') records = query.fetch(limit=10) logging.info(collection) for book_record in records: collection.append(book_record.title) self.response.out.write(collection) class Library(webapp.RequestHandler): """Returns Library Contents""" def get(self): #Just grab the latest post aaData = dict(aaData=[]) #select the latest input from the datastore record = db.GqlQuery(""" SELECT * FROM Book ORDER BY date DESC LIMIT 100""") for book in record: row = [] row.append(book.title) row.append(book.author) row.append(book.copyright.strftime('%Y')) aaData['aaData'].append(row) logging.info('book = %s' %(book,)) aaData = simplejson.dumps(aaData) logging.info("GET: %s" % aaData) self.response.headers['Content-Type'] = 'application/json' self.response.out.write(aaData) class CreateBook(webapp.RequestHandler): def userBookshelf(self): """Gets the users bookshelf if none, makes one""" user = users.get_current_user() bookshelf = None #only make a shelf if the user is not an admin if user and not users.is_current_user_admin(): bookshelvesQuery = BookShelf.all().filter('owner = ', user) bookshelf = bookshelvesQuery.get() if not bookshelf: bookshelf = BookShelf(owner=user) bookshelf.put() logging.info(u'shelf = %s' %(bookshelf,)) return bookshelf def post(self): """Stores a new book entry""" title = self.request.get('title') author = self.request.get('author') copyright = self.request.get('copyright') #Create new book and save it book = Book() book.title = title book.author = author copyrightDate = parser.parse(copyright) book.copyright = copyrightDate book.date = datetime.datetime.now() #automatically add to current user's shelf shelf = self.userBookshelf() if shelf: book.bookshelf = shelf book.put() logging.info((title, author, copyright)) self.response.out.write(""" Book Updated: Title: %s, Author: %s, Copyright: %s""" %\ (book.title, book.author, book.copyright)) class CustomPlistHandler(PlistHandler): stripFromURL = '/plist/' def main(): application = webapp.WSGIApplication([('/', MainPage), ('/alt', MainPageLibrary), ('/submit_form', CreateBook), ('/library', Library), ('/plist/.*', CustomPlistHandler), ],debug=True) wsgiref.handlers.CGIHandler().run(application) if __name__ == "__main__": main()
[ [ 1, 0, 0.0186, 0.0062, 0, 0.66, 0, 709, 0, 1, 0, 0, 709, 0, 0 ], [ 1, 0, 0.0311, 0.0062, 0, 0.66, 0.05, 279, 0, 1, 0, 0, 279, 0, 0 ], [ 1, 0, 0.0373, 0.0062, 0, 0....
[ "import wsgiref.handlers", "from google.appengine.api import users", "from google.appengine.ext import webapp", "from google.appengine.ext.webapp.util import run_wsgi_app", "from google.appengine.ext import db", "from google.appengine.ext.webapp import template", "from django.utils import simplejson", ...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Core classes for markup processing.""" try: reduce # builtin in Python < 3 except NameError: from functools import reduce from itertools import chain import operator from genshi.util import plaintext, stripentities, striptags, stringrepr __all__ = ['Stream', 'Markup', 'escape', 'unescape', 'Attrs', 'Namespace', 'QName'] __docformat__ = 'restructuredtext en' class StreamEventKind(str): """A kind of event on a markup stream.""" __slots__ = [] _instances = {} def __new__(cls, val): return cls._instances.setdefault(val, str.__new__(cls, val)) class Stream(object): """Represents a stream of markup events. This class is basically an iterator over the events. Stream events are tuples of the form:: (kind, data, position) where ``kind`` is the event kind (such as `START`, `END`, `TEXT`, etc), ``data`` depends on the kind of event, and ``position`` is a ``(filename, line, offset)`` tuple that contains the location of the original element or text in the input. If the original location is unknown, ``position`` is ``(None, -1, -1)``. Also provided are ways to serialize the stream to text. The `serialize()` method will return an iterator over generated strings, while `render()` returns the complete generated text at once. Both accept various parameters that impact the way the stream is serialized. """ __slots__ = ['events', 'serializer'] START = StreamEventKind('START') #: a start tag END = StreamEventKind('END') #: an end tag TEXT = StreamEventKind('TEXT') #: literal text XML_DECL = StreamEventKind('XML_DECL') #: XML declaration DOCTYPE = StreamEventKind('DOCTYPE') #: doctype declaration START_NS = StreamEventKind('START_NS') #: start namespace mapping END_NS = StreamEventKind('END_NS') #: end namespace mapping START_CDATA = StreamEventKind('START_CDATA') #: start CDATA section END_CDATA = StreamEventKind('END_CDATA') #: end CDATA section PI = StreamEventKind('PI') #: processing instruction COMMENT = StreamEventKind('COMMENT') #: comment def __init__(self, events, serializer=None): """Initialize the stream with a sequence of markup events. :param events: a sequence or iterable providing the events :param serializer: the default serialization method to use for this stream :note: Changed in 0.5: added the `serializer` argument """ self.events = events #: The underlying iterable producing the events self.serializer = serializer #: The default serializion method def __iter__(self): return iter(self.events) def __or__(self, function): """Override the "bitwise or" operator to apply filters or serializers to the stream, providing a syntax similar to pipes on Unix shells. Assume the following stream produced by the `HTML` function: >>> from genshi.input import HTML >>> html = HTML('''<p onclick="alert('Whoa')">Hello, world!</p>''') >>> print(html) <p onclick="alert('Whoa')">Hello, world!</p> A filter such as the HTML sanitizer can be applied to that stream using the pipe notation as follows: >>> from genshi.filters import HTMLSanitizer >>> sanitizer = HTMLSanitizer() >>> print(html | sanitizer) <p>Hello, world!</p> Filters can be any function that accepts and produces a stream (where a stream is anything that iterates over events): >>> def uppercase(stream): ... for kind, data, pos in stream: ... if kind is TEXT: ... data = data.upper() ... yield kind, data, pos >>> print(html | sanitizer | uppercase) <p>HELLO, WORLD!</p> Serializers can also be used with this notation: >>> from genshi.output import TextSerializer >>> output = TextSerializer() >>> print(html | sanitizer | uppercase | output) HELLO, WORLD! Commonly, serializers should be used at the end of the "pipeline"; using them somewhere in the middle may produce unexpected results. :param function: the callable object that should be applied as a filter :return: the filtered stream :rtype: `Stream` """ return Stream(_ensure(function(self)), serializer=self.serializer) def filter(self, *filters): """Apply filters to the stream. This method returns a new stream with the given filters applied. The filters must be callables that accept the stream object as parameter, and return the filtered stream. The call:: stream.filter(filter1, filter2) is equivalent to:: stream | filter1 | filter2 :param filters: one or more callable objects that should be applied as filters :return: the filtered stream :rtype: `Stream` """ return reduce(operator.or_, (self,) + filters) def render(self, method=None, encoding='utf-8', out=None, **kwargs): """Return a string representation of the stream. Any additional keyword arguments are passed to the serializer, and thus depend on the `method` parameter value. :param method: determines how the stream is serialized; can be either "xml", "xhtml", "html", "text", or a custom serializer class; if `None`, the default serialization method of the stream is used :param encoding: how the output string should be encoded; if set to `None`, this method returns a `unicode` object :param out: a file-like object that the output should be written to instead of being returned as one big string; note that if this is a file or socket (or similar), the `encoding` must not be `None` (that is, the output must be encoded) :return: a `str` or `unicode` object (depending on the `encoding` parameter), or `None` if the `out` parameter is provided :rtype: `basestring` :see: XMLSerializer, XHTMLSerializer, HTMLSerializer, TextSerializer :note: Changed in 0.5: added the `out` parameter """ from genshi.output import encode if method is None: method = self.serializer or 'xml' generator = self.serialize(method=method, **kwargs) return encode(generator, method=method, encoding=encoding, out=out) def select(self, path, namespaces=None, variables=None): """Return a new stream that contains the events matching the given XPath expression. >>> from genshi import HTML >>> stream = HTML('<doc><elem>foo</elem><elem>bar</elem></doc>') >>> print(stream.select('elem')) <elem>foo</elem><elem>bar</elem> >>> print(stream.select('elem/text()')) foobar Note that the outermost element of the stream becomes the *context node* for the XPath test. That means that the expression "doc" would not match anything in the example above, because it only tests against child elements of the outermost element: >>> print(stream.select('doc')) <BLANKLINE> You can use the "." expression to match the context node itself (although that usually makes little sense): >>> print(stream.select('.')) <doc><elem>foo</elem><elem>bar</elem></doc> :param path: a string containing the XPath expression :param namespaces: mapping of namespace prefixes used in the path :param variables: mapping of variable names to values :return: the selected substream :rtype: `Stream` :raises PathSyntaxError: if the given path expression is invalid or not supported """ from genshi.path import Path return Path(path).select(self, namespaces, variables) def serialize(self, method='xml', **kwargs): """Generate strings corresponding to a specific serialization of the stream. Unlike the `render()` method, this method is a generator that returns the serialized output incrementally, as opposed to returning a single string. Any additional keyword arguments are passed to the serializer, and thus depend on the `method` parameter value. :param method: determines how the stream is serialized; can be either "xml", "xhtml", "html", "text", or a custom serializer class; if `None`, the default serialization method of the stream is used :return: an iterator over the serialization results (`Markup` or `unicode` objects, depending on the serialization method) :rtype: ``iterator`` :see: XMLSerializer, XHTMLSerializer, HTMLSerializer, TextSerializer """ from genshi.output import get_serializer if method is None: method = self.serializer or 'xml' return get_serializer(method, **kwargs)(_ensure(self)) def __str__(self): return self.render() def __unicode__(self): return self.render(encoding=None) def __html__(self): return self START = Stream.START END = Stream.END TEXT = Stream.TEXT XML_DECL = Stream.XML_DECL DOCTYPE = Stream.DOCTYPE START_NS = Stream.START_NS END_NS = Stream.END_NS START_CDATA = Stream.START_CDATA END_CDATA = Stream.END_CDATA PI = Stream.PI COMMENT = Stream.COMMENT def _ensure(stream): """Ensure that every item on the stream is actually a markup event.""" stream = iter(stream) event = stream.next() # Check whether the iterable is a real markup event stream by examining the # first item it yields; if it's not we'll need to do some conversion if type(event) is not tuple or len(event) != 3: for event in chain([event], stream): if hasattr(event, 'totuple'): event = event.totuple() else: event = TEXT, unicode(event), (None, -1, -1) yield event return # This looks like a markup event stream, so we'll just pass it through # unchanged yield event for event in stream: yield event class Attrs(tuple): """Immutable sequence type that stores the attributes of an element. Ordering of the attributes is preserved, while access by name is also supported. >>> attrs = Attrs([('href', '#'), ('title', 'Foo')]) >>> attrs Attrs([('href', '#'), ('title', 'Foo')]) >>> 'href' in attrs True >>> 'tabindex' in attrs False >>> attrs.get('title') 'Foo' Instances may not be manipulated directly. Instead, the operators ``|`` and ``-`` can be used to produce new instances that have specific attributes added, replaced or removed. To remove an attribute, use the ``-`` operator. The right hand side can be either a string or a set/sequence of strings, identifying the name(s) of the attribute(s) to remove: >>> attrs - 'title' Attrs([('href', '#')]) >>> attrs - ('title', 'href') Attrs() The original instance is not modified, but the operator can of course be used with an assignment: >>> attrs Attrs([('href', '#'), ('title', 'Foo')]) >>> attrs -= 'title' >>> attrs Attrs([('href', '#')]) To add a new attribute, use the ``|`` operator, where the right hand value is a sequence of ``(name, value)`` tuples (which includes `Attrs` instances): >>> attrs | [('title', 'Bar')] Attrs([('href', '#'), ('title', 'Bar')]) If the attributes already contain an attribute with a given name, the value of that attribute is replaced: >>> attrs | [('href', 'http://example.org/')] Attrs([('href', 'http://example.org/')]) """ __slots__ = [] def __contains__(self, name): """Return whether the list includes an attribute with the specified name. :return: `True` if the list includes the attribute :rtype: `bool` """ for attr, _ in self: if attr == name: return True def __getitem__(self, i): """Return an item or slice of the attributes list. >>> attrs = Attrs([('href', '#'), ('title', 'Foo')]) >>> attrs[1] ('title', 'Foo') >>> attrs[1:] Attrs([('title', 'Foo')]) """ items = tuple.__getitem__(self, i) if type(i) is slice: return Attrs(items) return items def __getslice__(self, i, j): """Return a slice of the attributes list. >>> attrs = Attrs([('href', '#'), ('title', 'Foo')]) >>> attrs[1:] Attrs([('title', 'Foo')]) """ return Attrs(tuple.__getslice__(self, i, j)) def __or__(self, attrs): """Return a new instance that contains the attributes in `attrs` in addition to any already existing attributes. :return: a new instance with the merged attributes :rtype: `Attrs` """ repl = dict([(an, av) for an, av in attrs if an in self]) return Attrs([(sn, repl.get(sn, sv)) for sn, sv in self] + [(an, av) for an, av in attrs if an not in self]) def __repr__(self): if not self: return 'Attrs()' return 'Attrs([%s])' % ', '.join([repr(item) for item in self]) def __sub__(self, names): """Return a new instance with all attributes with a name in `names` are removed. :param names: the names of the attributes to remove :return: a new instance with the attribute removed :rtype: `Attrs` """ if isinstance(names, basestring): names = (names,) return Attrs([(name, val) for name, val in self if name not in names]) def get(self, name, default=None): """Return the value of the attribute with the specified name, or the value of the `default` parameter if no such attribute is found. :param name: the name of the attribute :param default: the value to return when the attribute does not exist :return: the attribute value, or the `default` value if that attribute does not exist :rtype: `object` """ for attr, value in self: if attr == name: return value return default def totuple(self): """Return the attributes as a markup event. The returned event is a `TEXT` event, the data is the value of all attributes joined together. >>> Attrs([('href', '#'), ('title', 'Foo')]).totuple() ('TEXT', '#Foo', (None, -1, -1)) :return: a `TEXT` event :rtype: `tuple` """ return TEXT, ''.join([x[1] for x in self]), (None, -1, -1) class Markup(unicode): """Marks a string as being safe for inclusion in HTML/XML output without needing to be escaped. """ __slots__ = [] def __add__(self, other): return Markup(unicode.__add__(self, escape(other))) def __radd__(self, other): return Markup(unicode.__add__(escape(other), self)) def __mod__(self, args): if isinstance(args, dict): args = dict(zip(args.keys(), map(escape, args.values()))) elif isinstance(args, (list, tuple)): args = tuple(map(escape, args)) else: args = escape(args) return Markup(unicode.__mod__(self, args)) def __mul__(self, num): return Markup(unicode.__mul__(self, num)) __rmul__ = __mul__ def __repr__(self): return "<%s %s>" % (type(self).__name__, unicode.__repr__(self)) def join(self, seq, escape_quotes=True): """Return a `Markup` object which is the concatenation of the strings in the given sequence, where this `Markup` object is the separator between the joined elements. Any element in the sequence that is not a `Markup` instance is automatically escaped. :param seq: the sequence of strings to join :param escape_quotes: whether double quote characters in the elements should be escaped :return: the joined `Markup` object :rtype: `Markup` :see: `escape` """ return Markup(unicode.join(self, [escape(item, quotes=escape_quotes) for item in seq])) @classmethod def escape(cls, text, quotes=True): """Create a Markup instance from a string and escape special characters it may contain (<, >, & and \"). >>> escape('"1 < 2"') <Markup u'&#34;1 &lt; 2&#34;'> If the `quotes` parameter is set to `False`, the \" character is left as is. Escaping quotes is generally only required for strings that are to be used in attribute values. >>> escape('"1 < 2"', quotes=False) <Markup u'"1 &lt; 2"'> :param text: the text to escape :param quotes: if ``True``, double quote characters are escaped in addition to the other special characters :return: the escaped `Markup` string :rtype: `Markup` """ if not text: return cls() if type(text) is cls: return text if hasattr(text, '__html__'): return Markup(text.__html__()) text = text.replace('&', '&amp;') \ .replace('<', '&lt;') \ .replace('>', '&gt;') if quotes: text = text.replace('"', '&#34;') return cls(text) def unescape(self): """Reverse-escapes &, <, >, and \" and returns a `unicode` object. >>> Markup('1 &lt; 2').unescape() u'1 < 2' :return: the unescaped string :rtype: `unicode` :see: `genshi.core.unescape` """ if not self: return '' return unicode(self).replace('&#34;', '"') \ .replace('&gt;', '>') \ .replace('&lt;', '<') \ .replace('&amp;', '&') def stripentities(self, keepxmlentities=False): """Return a copy of the text with any character or numeric entities replaced by the equivalent UTF-8 characters. If the `keepxmlentities` parameter is provided and evaluates to `True`, the core XML entities (``&amp;``, ``&apos;``, ``&gt;``, ``&lt;`` and ``&quot;``) are not stripped. :return: a `Markup` instance with entities removed :rtype: `Markup` :see: `genshi.util.stripentities` """ return Markup(stripentities(self, keepxmlentities=keepxmlentities)) def striptags(self): """Return a copy of the text with all XML/HTML tags removed. :return: a `Markup` instance with all tags removed :rtype: `Markup` :see: `genshi.util.striptags` """ return Markup(striptags(self)) try: from genshi._speedups import Markup except ImportError: pass # just use the Python implementation escape = Markup.escape def unescape(text): """Reverse-escapes &, <, >, and \" and returns a `unicode` object. >>> unescape(Markup('1 &lt; 2')) u'1 < 2' If the provided `text` object is not a `Markup` instance, it is returned unchanged. >>> unescape('1 &lt; 2') '1 &lt; 2' :param text: the text to unescape :return: the unescsaped string :rtype: `unicode` """ if not isinstance(text, Markup): return text return text.unescape() class Namespace(object): """Utility class creating and testing elements with a namespace. Internally, namespace URIs are encoded in the `QName` of any element or attribute, the namespace URI being enclosed in curly braces. This class helps create and test these strings. A `Namespace` object is instantiated with the namespace URI. >>> html = Namespace('http://www.w3.org/1999/xhtml') >>> html Namespace('http://www.w3.org/1999/xhtml') >>> html.uri u'http://www.w3.org/1999/xhtml' The `Namespace` object can than be used to generate `QName` objects with that namespace: >>> html.body QName('http://www.w3.org/1999/xhtml}body') >>> html.body.localname u'body' >>> html.body.namespace u'http://www.w3.org/1999/xhtml' The same works using item access notation, which is useful for element or attribute names that are not valid Python identifiers: >>> html['body'] QName('http://www.w3.org/1999/xhtml}body') A `Namespace` object can also be used to test whether a specific `QName` belongs to that namespace using the ``in`` operator: >>> qname = html.body >>> qname in html True >>> qname in Namespace('http://www.w3.org/2002/06/xhtml2') False """ def __new__(cls, uri): if type(uri) is cls: return uri return object.__new__(cls) def __getnewargs__(self): return (self.uri,) def __getstate__(self): return self.uri def __setstate__(self, uri): self.uri = uri def __init__(self, uri): self.uri = unicode(uri) def __contains__(self, qname): return qname.namespace == self.uri def __ne__(self, other): return not self == other def __eq__(self, other): if isinstance(other, Namespace): return self.uri == other.uri return self.uri == other def __getitem__(self, name): return QName(self.uri + '}' + name) __getattr__ = __getitem__ def __hash__(self): return hash(self.uri) def __repr__(self): return '%s(%s)' % (type(self).__name__, stringrepr(self.uri)) def __str__(self): return self.uri.encode('utf-8') def __unicode__(self): return self.uri # The namespace used by attributes such as xml:lang and xml:space XML_NAMESPACE = Namespace('http://www.w3.org/XML/1998/namespace') class QName(unicode): """A qualified element or attribute name. The unicode value of instances of this class contains the qualified name of the element or attribute, in the form ``{namespace-uri}local-name``. The namespace URI can be obtained through the additional `namespace` attribute, while the local name can be accessed through the `localname` attribute. >>> qname = QName('foo') >>> qname QName('foo') >>> qname.localname u'foo' >>> qname.namespace >>> qname = QName('http://www.w3.org/1999/xhtml}body') >>> qname QName('http://www.w3.org/1999/xhtml}body') >>> qname.localname u'body' >>> qname.namespace u'http://www.w3.org/1999/xhtml' """ __slots__ = ['namespace', 'localname'] def __new__(cls, qname): """Create the `QName` instance. :param qname: the qualified name as a string of the form ``{namespace-uri}local-name``, where the leading curly brace is optional """ if type(qname) is cls: return qname parts = qname.lstrip('{').split('}', 1) if len(parts) > 1: self = unicode.__new__(cls, '{%s' % qname) self.namespace, self.localname = map(unicode, parts) else: self = unicode.__new__(cls, qname) self.namespace, self.localname = None, unicode(qname) return self def __getnewargs__(self): return (self.lstrip('{'),) def __repr__(self): return '%s(%s)' % (type(self).__name__, stringrepr(self.lstrip('{')))
[ [ 8, 0, 0.0193, 0.0014, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 7, 0, 0.0241, 0.0055, 0, 0.66, 0.0357, 0, 0, 1, 0, 0, 0, 0, 0 ], [ 8, 1, 0.0234, 0.0014, 1, 0.53, ...
[ "\"\"\"Core classes for markup processing.\"\"\"", "try:\n reduce # builtin in Python < 3\nexcept NameError:\n from functools import reduce", " reduce # builtin in Python < 3", " from functools import reduce", "from itertools import chain", "import operator", "from genshi.util import plainte...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import pickle from StringIO import StringIO try: from cStringIO import StringIO as cStringIO except ImportError: cStringIO = StringIO import unittest from genshi import core from genshi.core import Markup, Attrs, Namespace, QName, escape, unescape from genshi.input import XML, ParseError class StreamTestCase(unittest.TestCase): def test_render_utf8(self): xml = XML('<li>Über uns</li>') self.assertEqual('<li>Über uns</li>', xml.render()) def test_render_unicode(self): xml = XML('<li>Über uns</li>') self.assertEqual(u'<li>Über uns</li>', xml.render(encoding=None)) def test_render_ascii(self): xml = XML('<li>Über uns</li>') self.assertEqual('<li>&#220;ber uns</li>', xml.render(encoding='ascii')) def test_render_output_stream_utf8(self): xml = XML('<li>Über uns</li>') strio = cStringIO() self.assertEqual(None, xml.render(out=strio)) self.assertEqual('<li>Über uns</li>', strio.getvalue()) def test_render_output_stream_unicode(self): xml = XML('<li>Über uns</li>') strio = StringIO() self.assertEqual(None, xml.render(encoding=None, out=strio)) self.assertEqual(u'<li>Über uns</li>', strio.getvalue()) def test_pickle(self): xml = XML('<li>Foo</li>') buf = StringIO() pickle.dump(xml, buf, 2) buf.seek(0) xml = pickle.load(buf) self.assertEquals('<li>Foo</li>', xml.render(encoding=None)) class MarkupTestCase(unittest.TestCase): def test_new_with_encoding(self): markup = Markup('Döner', encoding='utf-8') self.assertEquals("<Markup u'D\\xf6ner'>", repr(markup)) def test_repr(self): markup = Markup('foo') self.assertEquals("<Markup u'foo'>", repr(markup)) def test_escape(self): markup = escape('<b>"&"</b>') assert type(markup) is Markup self.assertEquals('&lt;b&gt;&#34;&amp;&#34;&lt;/b&gt;', markup) def test_escape_noquotes(self): markup = escape('<b>"&"</b>', quotes=False) assert type(markup) is Markup self.assertEquals('&lt;b&gt;"&amp;"&lt;/b&gt;', markup) def test_unescape_markup(self): string = '<b>"&"</b>' markup = Markup.escape(string) assert type(markup) is Markup self.assertEquals(string, unescape(markup)) def test_add_str(self): markup = Markup('<b>foo</b>') + '<br/>' assert type(markup) is Markup self.assertEquals('<b>foo</b>&lt;br/&gt;', markup) def test_add_markup(self): markup = Markup('<b>foo</b>') + Markup('<br/>') assert type(markup) is Markup self.assertEquals('<b>foo</b><br/>', markup) def test_add_reverse(self): markup = '<br/>' + Markup('<b>bar</b>') assert type(markup) is Markup self.assertEquals('&lt;br/&gt;<b>bar</b>', markup) def test_mod(self): markup = Markup('<b>%s</b>') % '&' assert type(markup) is Markup self.assertEquals('<b>&amp;</b>', markup) def test_mod_multi(self): markup = Markup('<b>%s</b> %s') % ('&', 'boo') assert type(markup) is Markup self.assertEquals('<b>&amp;</b> boo', markup) def test_mod_mapping(self): markup = Markup('<b>%(foo)s</b>') % {'foo': '&'} assert type(markup) is Markup self.assertEquals('<b>&amp;</b>', markup) def test_mod_noescape(self): markup = Markup('<b>%(amp)s</b>') % {'amp': Markup('&amp;')} assert type(markup) is Markup self.assertEquals('<b>&amp;</b>', markup) def test_mul(self): markup = Markup('<b>foo</b>') * 2 assert type(markup) is Markup self.assertEquals('<b>foo</b><b>foo</b>', markup) def test_mul_reverse(self): markup = 2 * Markup('<b>foo</b>') assert type(markup) is Markup self.assertEquals('<b>foo</b><b>foo</b>', markup) def test_join(self): markup = Markup('<br />').join(['foo', '<bar />', Markup('<baz />')]) assert type(markup) is Markup self.assertEquals('foo<br />&lt;bar /&gt;<br /><baz />', markup) def test_stripentities_all(self): markup = Markup('&amp; &#106;').stripentities() assert type(markup) is Markup self.assertEquals('& j', markup) def test_stripentities_keepxml(self): markup = Markup('&amp; &#106;').stripentities(keepxmlentities=True) assert type(markup) is Markup self.assertEquals('&amp; j', markup) def test_striptags_empty(self): markup = Markup('<br />').striptags() assert type(markup) is Markup self.assertEquals('', markup) def test_striptags_mid(self): markup = Markup('<a href="#">fo<br />o</a>').striptags() assert type(markup) is Markup self.assertEquals('foo', markup) def test_pickle(self): markup = Markup('foo') buf = StringIO() pickle.dump(markup, buf, 2) buf.seek(0) self.assertEquals("<Markup u'foo'>", repr(pickle.load(buf))) class AttrsTestCase(unittest.TestCase): def test_pickle(self): attrs = Attrs([("attr1", "foo"), ("attr2", "bar")]) buf = StringIO() pickle.dump(attrs, buf, 2) buf.seek(0) unpickled = pickle.load(buf) self.assertEquals("Attrs([('attr1', 'foo'), ('attr2', 'bar')])", repr(unpickled)) def test_non_ascii(self): attrs_tuple = Attrs([("attr1", u"föö"), ("attr2", u"bär")]).totuple() self.assertEqual(u'fööbär', attrs_tuple[1]) class NamespaceTestCase(unittest.TestCase): def test_repr(self): self.assertEqual("Namespace('http://www.example.org/namespace')", repr(Namespace('http://www.example.org/namespace'))) def test_repr_eval(self): ns = Namespace('http://www.example.org/namespace') self.assertEqual(eval(repr(ns)), ns) def test_repr_eval_non_ascii(self): ns = Namespace(u'http://www.example.org/nämespäcé') self.assertEqual(eval(repr(ns)), ns) def test_pickle(self): ns = Namespace('http://www.example.org/namespace') buf = StringIO() pickle.dump(ns, buf, 2) buf.seek(0) unpickled = pickle.load(buf) self.assertEquals("Namespace('http://www.example.org/namespace')", repr(unpickled)) self.assertEquals('http://www.example.org/namespace', unpickled.uri) class QNameTestCase(unittest.TestCase): def test_pickle(self): qname = QName('http://www.example.org/namespace}elem') buf = StringIO() pickle.dump(qname, buf, 2) buf.seek(0) unpickled = pickle.load(buf) self.assertEquals('{http://www.example.org/namespace}elem', unpickled) self.assertEquals('http://www.example.org/namespace', unpickled.namespace) self.assertEquals('elem', unpickled.localname) def test_repr(self): self.assertEqual("QName('elem')", repr(QName('elem'))) self.assertEqual("QName('http://www.example.org/namespace}elem')", repr(QName('http://www.example.org/namespace}elem'))) def test_repr_eval(self): qn = QName('elem') self.assertEqual(eval(repr(qn)), qn) def test_repr_eval_non_ascii(self): qn = QName(u'élem') self.assertEqual(eval(repr(qn)), qn) def test_leading_curly_brace(self): qname = QName('{http://www.example.org/namespace}elem') self.assertEquals('http://www.example.org/namespace', qname.namespace) self.assertEquals('elem', qname.localname) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(StreamTestCase, 'test')) suite.addTest(unittest.makeSuite(MarkupTestCase, 'test')) suite.addTest(unittest.makeSuite(NamespaceTestCase, 'test')) suite.addTest(unittest.makeSuite(AttrsTestCase, 'test')) suite.addTest(unittest.makeSuite(QNameTestCase, 'test')) suite.addTest(doctest.DocTestSuite(core)) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.0558, 0.004, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0598, 0.004, 0, 0.66, 0.0714, 848, 0, 1, 0, 0, 848, 0, 0 ], [ 1, 0, 0.0637, 0.004, 0, 0.6...
[ "import doctest", "import pickle", "from StringIO import StringIO", "try:\n from cStringIO import StringIO as cStringIO\nexcept ImportError:\n cStringIO = StringIO", " from cStringIO import StringIO as cStringIO", " cStringIO = StringIO", "import unittest", "from genshi import core", "fr...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2008 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import unittest import sys from genshi.core import Attrs, Stream, QName from genshi.input import HTML, XML from genshi.output import DocType, XMLSerializer, XHTMLSerializer, \ HTMLSerializer, EmptyTagFilter class XMLSerializerTestCase(unittest.TestCase): def test_with_xml_decl(self): stream = Stream([(Stream.XML_DECL, ('1.0', None, -1), (None, -1, -1))]) output = stream.render(XMLSerializer, doctype='xhtml', encoding=None) self.assertEqual('<?xml version="1.0"?>\n' '<!DOCTYPE html PUBLIC ' '"-//W3C//DTD XHTML 1.0 Strict//EN" ' '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n', output) def test_doctype_in_stream(self): stream = Stream([(Stream.DOCTYPE, DocType.HTML_STRICT, (None, -1, -1))]) output = stream.render(XMLSerializer, encoding=None) self.assertEqual('<!DOCTYPE html PUBLIC ' '"-//W3C//DTD HTML 4.01//EN" ' '"http://www.w3.org/TR/html4/strict.dtd">\n', output) def test_doctype_in_stream_no_sysid(self): stream = Stream([(Stream.DOCTYPE, ('html', '-//W3C//DTD HTML 4.01//EN', None), (None, -1, -1))]) output = stream.render(XMLSerializer, encoding=None) self.assertEqual('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">\n', output) def test_doctype_in_stream_no_pubid(self): stream = Stream([ (Stream.DOCTYPE, ('html', None, 'http://www.w3.org/TR/html4/strict.dtd'), (None, -1, -1)) ]) output = stream.render(XMLSerializer, encoding=None) self.assertEqual('<!DOCTYPE html SYSTEM ' '"http://www.w3.org/TR/html4/strict.dtd">\n', output) def test_doctype_in_stream_no_pubid_or_sysid(self): stream = Stream([(Stream.DOCTYPE, ('html', None, None), (None, -1, -1))]) output = stream.render(XMLSerializer, encoding=None) self.assertEqual('<!DOCTYPE html>\n', output) def test_serializer_doctype(self): stream = Stream([]) output = stream.render(XMLSerializer, doctype=DocType.HTML_STRICT, encoding=None) self.assertEqual('<!DOCTYPE html PUBLIC ' '"-//W3C//DTD HTML 4.01//EN" ' '"http://www.w3.org/TR/html4/strict.dtd">\n', output) def test_doctype_one_and_only(self): stream = Stream([ (Stream.DOCTYPE, ('html', None, None), (None, -1, -1)) ]) output = stream.render(XMLSerializer, doctype=DocType.HTML_STRICT, encoding=None) self.assertEqual('<!DOCTYPE html PUBLIC ' '"-//W3C//DTD HTML 4.01//EN" ' '"http://www.w3.org/TR/html4/strict.dtd">\n', output) def test_comment(self): stream = Stream([(Stream.COMMENT, 'foo bar', (None, -1, -1))]) output = stream.render(XMLSerializer, encoding=None) self.assertEqual('<!--foo bar-->', output) def test_processing_instruction(self): stream = Stream([(Stream.PI, ('python', 'x = 2'), (None, -1, -1))]) output = stream.render(XMLSerializer, encoding=None) self.assertEqual('<?python x = 2?>', output) def test_nested_default_namespaces(self): stream = Stream([ (Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('http://example.org/}div'), Attrs()), (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)), (Stream.END, QName('http://example.org/}p'), (None, -1, -1)), (Stream.END_NS, '', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)), (Stream.END, QName('http://example.org/}p'), (None, -1, -1)), (Stream.END_NS, '', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.END, QName('http://example.org/}div'), (None, -1, -1)), (Stream.END_NS, '', (None, -1, -1)) ]) output = stream.render(XMLSerializer, encoding=None) self.assertEqual("""<div xmlns="http://example.org/"> <p/> <p/> </div>""", output) def test_nested_bound_namespaces(self): stream = Stream([ (Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('http://example.org/}div'), Attrs()), (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)), (Stream.END, QName('http://example.org/}p'), (None, -1, -1)), (Stream.END_NS, 'x', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)), (Stream.END, QName('http://example.org/}p'), (None, -1, -1)), (Stream.END_NS, 'x', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.END, QName('http://example.org/}div'), (None, -1, -1)), (Stream.END_NS, 'x', (None, -1, -1)) ]) output = stream.render(XMLSerializer, encoding=None) self.assertEqual("""<x:div xmlns:x="http://example.org/"> <x:p/> <x:p/> </x:div>""", output) def test_multiple_default_namespaces(self): stream = Stream([ (Stream.START, (QName('div'), Attrs()), (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)), (Stream.END, QName('http://example.org/}p'), (None, -1, -1)), (Stream.END_NS, '', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)), (Stream.END, QName('http://example.org/}p'), (None, -1, -1)), (Stream.END_NS, '', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.END, QName('div'), (None, -1, -1)), ]) output = stream.render(XMLSerializer, encoding=None) self.assertEqual("""<div> <p xmlns="http://example.org/"/> <p xmlns="http://example.org/"/> </div>""", output) def test_multiple_bound_namespaces(self): stream = Stream([ (Stream.START, (QName('div'), Attrs()), (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)), (Stream.END, QName('http://example.org/}p'), (None, -1, -1)), (Stream.END_NS, 'x', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)), (Stream.END, QName('http://example.org/}p'), (None, -1, -1)), (Stream.END_NS, 'x', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.END, QName('div'), (None, -1, -1)), ]) output = stream.render(XMLSerializer, encoding=None) self.assertEqual("""<div> <x:p xmlns:x="http://example.org/"/> <x:p xmlns:x="http://example.org/"/> </div>""", output) def test_atom_with_xhtml(self): text = """<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en"> <id>urn:uuid:c60843aa-0da8-4fa6-bbe5-98007bc6774e</id> <updated>2007-01-28T11:36:02.807108-06:00</updated> <title type="xhtml"> <div xmlns="http://www.w3.org/1999/xhtml">Example</div> </title> <subtitle type="xhtml"> <div xmlns="http://www.w3.org/1999/xhtml">Bla bla bla</div> </subtitle> <icon/> </feed>""" output = XML(text).render(XMLSerializer, encoding=None) self.assertEqual(text, output) class XHTMLSerializerTestCase(unittest.TestCase): def test_xml_decl_dropped(self): stream = Stream([(Stream.XML_DECL, ('1.0', None, -1), (None, -1, -1))]) output = stream.render(XHTMLSerializer, doctype='xhtml', encoding=None) self.assertEqual('<!DOCTYPE html PUBLIC ' '"-//W3C//DTD XHTML 1.0 Strict//EN" ' '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n', output) def test_xml_decl_included(self): stream = Stream([(Stream.XML_DECL, ('1.0', None, -1), (None, -1, -1))]) output = stream.render(XHTMLSerializer, doctype='xhtml', drop_xml_decl=False, encoding=None) self.assertEqual('<?xml version="1.0"?>\n' '<!DOCTYPE html PUBLIC ' '"-//W3C//DTD XHTML 1.0 Strict//EN" ' '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n', output) def test_xml_lang(self): text = '<p xml:lang="en">English text</p>' output = XML(text).render(XHTMLSerializer, encoding=None) self.assertEqual('<p lang="en" xml:lang="en">English text</p>', output) def test_xml_lang_nodup(self): text = '<p xml:lang="en" lang="en">English text</p>' output = XML(text).render(XHTMLSerializer, encoding=None) self.assertEqual('<p xml:lang="en" lang="en">English text</p>', output) def test_textarea_whitespace(self): content = '\nHey there. \n\n I am indented.\n' stream = XML('<textarea name="foo">%s</textarea>' % content) output = stream.render(XHTMLSerializer, encoding=None) self.assertEqual('<textarea name="foo">%s</textarea>' % content, output) def test_pre_whitespace(self): content = '\nHey <em>there</em>. \n\n I am indented.\n' stream = XML('<pre>%s</pre>' % content) output = stream.render(XHTMLSerializer, encoding=None) self.assertEqual('<pre>%s</pre>' % content, output) def test_xml_space(self): text = '<foo xml:space="preserve"> Do not mess \n\n with me </foo>' output = XML(text).render(XHTMLSerializer, encoding=None) self.assertEqual('<foo> Do not mess \n\n with me </foo>', output) def test_empty_script(self): text = """<html xmlns="http://www.w3.org/1999/xhtml"> <script src="foo.js" /> </html>""" output = XML(text).render(XHTMLSerializer, encoding=None) self.assertEqual("""<html xmlns="http://www.w3.org/1999/xhtml"> <script src="foo.js"></script> </html>""", output) def test_script_escaping(self): text = """<script>/*<![CDATA[*/ if (1 < 2) { alert("Doh"); } /*]]>*/</script>""" output = XML(text).render(XHTMLSerializer, encoding=None) self.assertEqual(text, output) def test_script_escaping_with_namespace(self): text = """<script xmlns="http://www.w3.org/1999/xhtml">/*<![CDATA[*/ if (1 < 2) { alert("Doh"); } /*]]>*/</script>""" output = XML(text).render(XHTMLSerializer, encoding=None) self.assertEqual(text, output) def test_style_escaping(self): text = """<style>/*<![CDATA[*/ html > body { display: none; } /*]]>*/</style>""" output = XML(text).render(XHTMLSerializer, encoding=None) self.assertEqual(text, output) def test_style_escaping_with_namespace(self): text = """<style xmlns="http://www.w3.org/1999/xhtml">/*<![CDATA[*/ html > body { display: none; } /*]]>*/</style>""" output = XML(text).render(XHTMLSerializer, encoding=None) self.assertEqual(text, output) def test_embedded_svg(self): text = """<html xmlns="http://www.w3.org/1999/xhtml" xmlns:svg="http://www.w3.org/2000/svg"> <body> <button> <svg:svg width="600px" height="400px"> <svg:polygon id="triangle" points="50,50 50,300 300,300"></svg:polygon> </svg:svg> </button> </body> </html>""" output = XML(text).render(XHTMLSerializer, encoding=None) self.assertEqual(text, output) def test_xhtml_namespace_prefix(self): text = """<div xmlns="http://www.w3.org/1999/xhtml"> <strong>Hello</strong> </div>""" output = XML(text).render(XHTMLSerializer, encoding=None) self.assertEqual(text, output) def test_nested_default_namespaces(self): stream = Stream([ (Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('div'), Attrs()), (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('p'), Attrs()), (None, -1, -1)), (Stream.END, QName('p'), (None, -1, -1)), (Stream.END_NS, '', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('p'), Attrs()), (None, -1, -1)), (Stream.END, QName('p'), (None, -1, -1)), (Stream.END_NS, '', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.END, QName('div'), (None, -1, -1)), (Stream.END_NS, '', (None, -1, -1)) ]) output = stream.render(XHTMLSerializer, encoding=None) self.assertEqual("""<div xmlns="http://example.org/"> <p></p> <p></p> </div>""", output) def test_nested_bound_namespaces(self): stream = Stream([ (Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('div'), Attrs()), (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('p'), Attrs()), (None, -1, -1)), (Stream.END, QName('p'), (None, -1, -1)), (Stream.END_NS, 'x', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)), (Stream.START, (QName('p'), Attrs()), (None, -1, -1)), (Stream.END, QName('p'), (None, -1, -1)), (Stream.END_NS, 'x', (None, -1, -1)), (Stream.TEXT, '\n ', (None, -1, -1)), (Stream.END, QName('div'), (None, -1, -1)), (Stream.END_NS, 'x', (None, -1, -1)) ]) output = stream.render(XHTMLSerializer, encoding=None) self.assertEqual("""<div xmlns:x="http://example.org/"> <p></p> <p></p> </div>""", output) def test_html5_doctype(self): stream = HTML('<html></html>') output = stream.render(XHTMLSerializer, doctype=DocType.HTML5, encoding=None) self.assertEqual('<!DOCTYPE html>\n<html></html>', output) class HTMLSerializerTestCase(unittest.TestCase): def test_xml_lang(self): text = '<p xml:lang="en">English text</p>' output = XML(text).render(HTMLSerializer, encoding=None) self.assertEqual('<p lang="en">English text</p>', output) def test_xml_lang_nodup(self): text = '<p lang="en" xml:lang="en">English text</p>' output = XML(text).render(HTMLSerializer, encoding=None) self.assertEqual('<p lang="en">English text</p>', output) def test_textarea_whitespace(self): content = '\nHey there. \n\n I am indented.\n' stream = XML('<textarea name="foo">%s</textarea>' % content) output = stream.render(HTMLSerializer, encoding=None) self.assertEqual('<textarea name="foo">%s</textarea>' % content, output) def test_pre_whitespace(self): content = '\nHey <em>there</em>. \n\n I am indented.\n' stream = XML('<pre>%s</pre>' % content) output = stream.render(HTMLSerializer, encoding=None) self.assertEqual('<pre>%s</pre>' % content, output) def test_xml_space(self): text = '<foo xml:space="preserve"> Do not mess \n\n with me </foo>' output = XML(text).render(HTMLSerializer, encoding=None) self.assertEqual('<foo> Do not mess \n\n with me </foo>', output) def test_empty_script(self): text = '<script src="foo.js" />' output = XML(text).render(HTMLSerializer, encoding=None) self.assertEqual('<script src="foo.js"></script>', output) def test_script_escaping(self): text = '<script>if (1 &lt; 2) { alert("Doh"); }</script>' output = XML(text).render(HTMLSerializer, encoding=None) self.assertEqual('<script>if (1 < 2) { alert("Doh"); }</script>', output) def test_script_escaping_with_namespace(self): text = """<script xmlns="http://www.w3.org/1999/xhtml"> if (1 &lt; 2) { alert("Doh"); } </script>""" output = XML(text).render(HTMLSerializer, encoding=None) self.assertEqual("""<script> if (1 < 2) { alert("Doh"); } </script>""", output) def test_style_escaping(self): text = '<style>html &gt; body { display: none; }</style>' output = XML(text).render(HTMLSerializer, encoding=None) self.assertEqual('<style>html > body { display: none; }</style>', output) def test_style_escaping_with_namespace(self): text = """<style xmlns="http://www.w3.org/1999/xhtml"> html &gt; body { display: none; } </style>""" output = XML(text).render(HTMLSerializer, encoding=None) self.assertEqual("""<style> html > body { display: none; } </style>""", output) def test_html5_doctype(self): stream = HTML('<html></html>') output = stream.render(HTMLSerializer, doctype=DocType.HTML5, encoding=None) self.assertEqual('<!DOCTYPE html>\n<html></html>', output) class EmptyTagFilterTestCase(unittest.TestCase): def test_empty(self): stream = XML('<elem></elem>') | EmptyTagFilter() self.assertEqual([EmptyTagFilter.EMPTY], [ev[0] for ev in stream]) def test_text_content(self): stream = XML('<elem>foo</elem>') | EmptyTagFilter() self.assertEqual([Stream.START, Stream.TEXT, Stream.END], [ev[0] for ev in stream]) def test_elem_content(self): stream = XML('<elem><sub /><sub /></elem>') | EmptyTagFilter() self.assertEqual([Stream.START, EmptyTagFilter.EMPTY, EmptyTagFilter.EMPTY, Stream.END], [ev[0] for ev in stream]) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(XMLSerializerTestCase, 'test')) suite.addTest(unittest.makeSuite(XHTMLSerializerTestCase, 'test')) suite.addTest(unittest.makeSuite(HTMLSerializerTestCase, 'test')) suite.addTest(unittest.makeSuite(EmptyTagFilterTestCase, 'test')) suite.addTest(doctest.DocTestSuite(XMLSerializer.__module__)) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.0302, 0.0022, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0323, 0.0022, 0, 0.66, 0.0909, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.0345, 0.0022, 0, 0....
[ "import doctest", "import unittest", "import sys", "from genshi.core import Attrs, Stream, QName", "from genshi.input import HTML, XML", "from genshi.output import DocType, XMLSerializer, XHTMLSerializer, \\\n HTMLSerializer, EmptyTagFilter", "class XMLSerializerTestCase(unittes...
# -*- coding: utf-8 -*- # # Copyright (C) 2006,2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import unittest from genshi import util from genshi.util import LRUCache class LRUCacheTestCase(unittest.TestCase): def test_setitem(self): cache = LRUCache(2) cache['A'] = 0 self.assertEqual(1, len(cache)) self.assertEqual('A', cache.head.key) self.assertEqual('A', cache.tail.key) item_a = cache._dict['A'] self.assertEqual('A', item_a.key) self.assertEqual(0, item_a.value) self.assertEqual(None, item_a.prv) self.assertEqual(None, item_a.nxt) cache['B'] = 1 self.assertEqual(2, len(cache)) self.assertEqual('B', cache.head.key) self.assertEqual('A', cache.tail.key) item_a = cache._dict['A'] item_b = cache._dict['B'] self.assertEqual('A', item_a.key) self.assertEqual(0, item_a.value) self.assertEqual(item_b, item_a.prv) self.assertEqual(None, item_a.nxt) self.assertEqual('B', item_b.key) self.assertEqual(1, item_b.value) self.assertEqual(None, item_b.prv) self.assertEqual(item_a, item_b.nxt) cache['C'] = 2 self.assertEqual(2, len(cache)) self.assertEqual('C', cache.head.key) self.assertEqual('B', cache.tail.key) item_b = cache._dict['B'] item_c = cache._dict['C'] self.assertEqual('B', item_b.key) self.assertEqual(1, item_b.value) self.assertEqual(item_c, item_b.prv) self.assertEqual(None, item_b.nxt) self.assertEqual('C', item_c.key) self.assertEqual(2, item_c.value) self.assertEqual(None, item_c.prv) self.assertEqual(item_b, item_c.nxt) def test_getitem(self): cache = LRUCache(2) cache['A'] = 0 cache['B'] = 1 cache['A'] self.assertEqual(2, len(cache)) self.assertEqual('A', cache.head.key) self.assertEqual('B', cache.tail.key) item_a = cache._dict['A'] item_b = cache._dict['B'] self.assertEqual('A', item_a.key) self.assertEqual(0, item_a.value) self.assertEqual(None, item_a.prv) self.assertEqual(item_b, item_a.nxt) self.assertEqual('B', item_b.key) self.assertEqual(1, item_b.value) self.assertEqual(item_a, item_b.prv) self.assertEqual(None, item_b.nxt) def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(util)) suite.addTest(unittest.makeSuite(LRUCacheTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.1489, 0.0106, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.1596, 0.0106, 0, 0.66, 0.1667, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.1809, 0.0106, 0, 0....
[ "import doctest", "import unittest", "from genshi import util", "from genshi.util import LRUCache", "class LRUCacheTestCase(unittest.TestCase):\n\n def test_setitem(self):\n cache = LRUCache(2)\n cache['A'] = 0\n self.assertEqual(1, len(cache))\n self.assertEqual('A', cache.he...
# -*- coding: utf-8 -*- # # Copyright (C) 2006 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import unittest from genshi.input import XML from genshi.path import Path, PathParser, PathSyntaxError, GenericStrategy, \ SingleStepStrategy, SimplePathStrategy class FakePath(Path): def __init__(self, strategy): self.strategy = strategy def test(self, ignore_context = False): return self.strategy.test(ignore_context) class PathTestCase(unittest.TestCase): strategies = [GenericStrategy, SingleStepStrategy, SimplePathStrategy] def test_error_no_absolute_path(self): self.assertRaises(PathSyntaxError, Path, '/root') def test_error_unsupported_axis(self): self.assertRaises(PathSyntaxError, Path, '..') self.assertRaises(PathSyntaxError, Path, 'parent::ma') def test_1step(self): xml = XML('<root><elem/></root>') self._test_eval( path = 'elem', equiv = '<Path "child::elem">', input = xml, output = '<elem/>' ) self._test_eval( path = 'elem', equiv = '<Path "child::elem">', input = xml, output = '<elem/>' ) self._test_eval( path = 'child::elem', equiv = '<Path "child::elem">', input = xml, output = '<elem/>' ) self._test_eval( path = '//elem', equiv = '<Path "descendant-or-self::elem">', input = xml, output = '<elem/>' ) self._test_eval( path = 'descendant::elem', equiv = '<Path "descendant::elem">', input = xml, output = '<elem/>' ) def test_1step_self(self): xml = XML('<root><elem/></root>') self._test_eval( path = '.', equiv = '<Path "self::node()">', input = xml, output = '<root><elem/></root>' ) self._test_eval( path = 'self::node()', equiv = '<Path "self::node()">', input = xml, output = '<root><elem/></root>' ) def test_1step_wildcard(self): xml = XML('<root><elem/></root>') self._test_eval( path = '*', equiv = '<Path "child::*">', input = xml, output = '<elem/>' ) self._test_eval( path = 'child::*', equiv = '<Path "child::*">', input = xml, output = '<elem/>' ) self._test_eval( path = 'child::node()', equiv = '<Path "child::node()">', input = xml, output = '<elem/>' ) self._test_eval( path = '//*', equiv = '<Path "descendant-or-self::*">', input = xml, output = '<root><elem/></root>' ) def test_1step_attribute(self): self._test_eval( path = '@foo', equiv = '<Path "attribute::foo">', input = XML('<root/>'), output = '' ) xml = XML('<root foo="bar"/>') self._test_eval( path = '@foo', equiv = '<Path "attribute::foo">', input = xml, output = 'bar' ) self._test_eval( path = './@foo', equiv = '<Path "self::node()/attribute::foo">', input = xml, output = 'bar' ) def test_1step_text(self): xml = XML('<root>Hey</root>') self._test_eval( path = 'text()', equiv = '<Path "child::text()">', input = xml, output = 'Hey' ) self._test_eval( path = './text()', equiv = '<Path "self::node()/child::text()">', input = xml, output = 'Hey' ) self._test_eval( path = '//text()', equiv = '<Path "descendant-or-self::text()">', input = xml, output = 'Hey' ) self._test_eval( path = './/text()', equiv = '<Path "self::node()/descendant-or-self::node()/child::text()">', input = xml, output = 'Hey' ) def test_2step(self): xml = XML('<root><foo/><bar/></root>') self._test_eval('*', input=xml, output='<foo/><bar/>') self._test_eval('bar', input=xml, output='<bar/>') self._test_eval('baz', input=xml, output='') def test_2step_attribute(self): xml = XML('<elem class="x"><span id="joe">Hey Joe</span></elem>') self._test_eval('@*', input=xml, output='x') self._test_eval('./@*', input=xml, output='x') self._test_eval('.//@*', input=xml, output='xjoe') self._test_eval('*/@*', input=xml, output='joe') xml = XML('<elem><foo id="1"/><foo id="2"/></elem>') self._test_eval('@*', input=xml, output='') self._test_eval('foo/@*', input=xml, output='12') def test_2step_complex(self): xml = XML('<root><foo><bar/></foo></root>') self._test_eval( path = 'foo/bar', equiv = '<Path "child::foo/child::bar">', input = xml, output = '<bar/>' ) self._test_eval( path = './bar', equiv = '<Path "self::node()/child::bar">', input = xml, output = '' ) self._test_eval( path = 'foo/*', equiv = '<Path "child::foo/child::*">', input = xml, output = '<bar/>' ) xml = XML('<root><foo><bar id="1"/></foo><bar id="2"/></root>') self._test_eval( path = './bar', equiv = '<Path "self::node()/child::bar">', input = xml, output = '<bar id="2"/>' ) xml = XML('''<table> <tr><td>1</td><td>One</td></tr> <tr><td>2</td><td>Two</td></tr> </table>''') self._test_eval( path = 'tr/td[1]', input = xml, output = '<td>1</td><td>2</td>' ) xml = XML('''<ul> <li>item1 <ul><li>subitem11</li></ul> </li> <li>item2 <ul><li>subitem21</li></ul> </li> </ul>''') self._test_eval( path = 'li[2]/ul', input = xml, output = '<ul><li>subitem21</li></ul>' ) def test_2step_text(self): xml = XML('<root><item>Foo</item></root>') self._test_eval( path = 'item/text()', equiv = '<Path "child::item/child::text()">', input = xml, output = 'Foo' ) self._test_eval( path = '*/text()', equiv = '<Path "child::*/child::text()">', input = xml, output = 'Foo' ) self._test_eval( path = '//text()', equiv = '<Path "descendant-or-self::text()">', input = xml, output = 'Foo' ) self._test_eval( path = './text()', equiv = '<Path "self::node()/child::text()">', input = xml, output = '' ) xml = XML('<root><item>Foo</item><item>Bar</item></root>') self._test_eval( path = 'item/text()', equiv = '<Path "child::item/child::text()">', input = xml, output = 'FooBar' ) xml = XML('<root><item><name>Foo</name><sub><name>Bar</name></sub></item></root>') self._test_eval( path = 'item/name/text()', equiv = '<Path "child::item/child::name/child::text()">', input = xml, output = 'Foo' ) def test_3step(self): xml = XML('<root><foo><bar/></foo></root>') self._test_eval( path = 'foo/*', equiv = '<Path "child::foo/child::*">', input = xml, output = '<bar/>' ) def test_3step_complex(self): self._test_eval( path = '*/bar', equiv = '<Path "child::*/child::bar">', input = XML('<root><foo><bar/></foo></root>'), output = '<bar/>' ) self._test_eval( path = '//bar', equiv = '<Path "descendant-or-self::bar">', input = XML('<root><foo><bar id="1"/></foo><bar id="2"/></root>'), output = '<bar id="1"/><bar id="2"/>' ) def test_3step_complex_text(self): xml = XML('<root><item><bar>Some text </bar><baz><bar>in here.</bar></baz></item></root>') self._test_eval( path = 'item/bar/text()', equiv = '<Path "child::item/child::bar/child::text()">', input = xml, output = 'Some text ' ) self._test_eval( path = 'item//bar/text()', equiv = '<Path "child::item/descendant-or-self::node()/child::bar/child::text()">', input = xml, output = 'Some text in here.' ) def test_node_type_comment(self): xml = XML('<root><!-- commented --></root>') self._test_eval( path = 'comment()', equiv = '<Path "child::comment()">', input = xml, output = '<!-- commented -->' ) def test_node_type_text(self): xml = XML('<root>Some text <br/>in here.</root>') self._test_eval( path = 'text()', equiv = '<Path "child::text()">', input = xml, output = 'Some text in here.' ) def test_node_type_node(self): xml = XML('<root>Some text <br/>in here.</root>') self._test_eval( path = 'node()', equiv = '<Path "child::node()">', input = xml, output = 'Some text <br/>in here.' ) def test_node_type_processing_instruction(self): xml = XML('<?python x = 2 * 3 ?><root><?php echo("x") ?></root>') self._test_eval( path = '//processing-instruction()', equiv = '<Path "descendant-or-self::processing-instruction()">', input = xml, output = '<?python x = 2 * 3 ?><?php echo("x") ?>' ) self._test_eval( path = 'processing-instruction()', equiv = '<Path "child::processing-instruction()">', input = xml, output = '<?php echo("x") ?>' ) self._test_eval( path = 'processing-instruction("php")', equiv = '<Path "child::processing-instruction(\"php\")">', input = xml, output = '<?php echo("x") ?>' ) def test_simple_union(self): xml = XML("""<body>1<br />2<br />3<br /></body>""") self._test_eval( path = '*|text()', equiv = '<Path "child::*|child::text()">', input = xml, output = '1<br/>2<br/>3<br/>' ) def test_predicate_name(self): xml = XML('<root><foo/><bar/></root>') self._test_eval('*[name()="foo"]', input=xml, output='<foo/>') def test_predicate_localname(self): xml = XML('<root><foo xmlns="NS"/><bar/></root>') self._test_eval('*[local-name()="foo"]', input=xml, output='<foo xmlns="NS"/>') def test_predicate_namespace(self): xml = XML('<root><foo xmlns="NS"/><bar/></root>') self._test_eval('*[namespace-uri()="NS"]', input=xml, output='<foo xmlns="NS"/>') def test_predicate_not_name(self): xml = XML('<root><foo/><bar/></root>') self._test_eval('*[not(name()="foo")]', input=xml, output='<bar/>') def test_predicate_attr(self): xml = XML('<root><item/><item important="very"/></root>') self._test_eval('item[@important]', input=xml, output='<item important="very"/>') self._test_eval('item[@important="very"]', input=xml, output='<item important="very"/>') def test_predicate_attr_equality(self): xml = XML('<root><item/><item important="notso"/></root>') self._test_eval('item[@important="very"]', input=xml, output='') self._test_eval('item[@important!="very"]', input=xml, output='<item/><item important="notso"/>') def test_predicate_attr_greater_than(self): xml = XML('<root><item priority="3"/></root>') self._test_eval('item[@priority>3]', input=xml, output='') self._test_eval('item[@priority>2]', input=xml, output='<item priority="3"/>') def test_predicate_attr_less_than(self): xml = XML('<root><item priority="3"/></root>') self._test_eval('item[@priority<3]', input=xml, output='') self._test_eval('item[@priority<4]', input=xml, output='<item priority="3"/>') def test_predicate_attr_and(self): xml = XML('<root><item/><item important="very"/></root>') self._test_eval('item[@important and @important="very"]', input=xml, output='<item important="very"/>') self._test_eval('item[@important and @important="notso"]', input=xml, output='') def test_predicate_attr_or(self): xml = XML('<root><item/><item important="very"/></root>') self._test_eval('item[@urgent or @important]', input=xml, output='<item important="very"/>') self._test_eval('item[@urgent or @notso]', input=xml, output='') def test_predicate_boolean_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[boolean("")]', input=xml, output='') self._test_eval('*[boolean("yo")]', input=xml, output='<foo>bar</foo>') self._test_eval('*[boolean(0)]', input=xml, output='') self._test_eval('*[boolean(42)]', input=xml, output='<foo>bar</foo>') self._test_eval('*[boolean(false())]', input=xml, output='') self._test_eval('*[boolean(true())]', input=xml, output='<foo>bar</foo>') def test_predicate_ceil_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[ceiling("4.5")=5]', input=xml, output='<foo>bar</foo>') def test_predicate_concat_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[name()=concat("f", "oo")]', input=xml, output='<foo>bar</foo>') def test_predicate_contains_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[contains(name(), "oo")]', input=xml, output='<foo>bar</foo>') def test_predicate_matches_function(self): xml = XML('<root><foo>bar</foo><bar>foo</bar></root>') self._test_eval('*[matches(name(), "foo|bar")]', input=xml, output='<foo>bar</foo><bar>foo</bar>') def test_predicate_false_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[false()]', input=xml, output='') def test_predicate_floor_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[floor("4.5")=4]', input=xml, output='<foo>bar</foo>') def test_predicate_normalize_space_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[normalize-space(" foo bar ")="foo bar"]', input=xml, output='<foo>bar</foo>') def test_predicate_number_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[number("3.0")=3]', input=xml, output='<foo>bar</foo>') self._test_eval('*[number("3.0")=3.0]', input=xml, output='<foo>bar</foo>') self._test_eval('*[number("0.1")=.1]', input=xml, output='<foo>bar</foo>') def test_predicate_round_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[round("4.4")=4]', input=xml, output='<foo>bar</foo>') self._test_eval('*[round("4.6")=5]', input=xml, output='<foo>bar</foo>') def test_predicate_starts_with_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[starts-with(name(), "f")]', input=xml, output='<foo>bar</foo>') self._test_eval('*[starts-with(name(), "b")]', input=xml, output='') def test_predicate_string_length_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[string-length(name())=3]', input=xml, output='<foo>bar</foo>') def test_predicate_substring_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[substring(name(), 1)="oo"]', input=xml, output='<foo>bar</foo>') self._test_eval('*[substring(name(), 1, 1)="o"]', input=xml, output='<foo>bar</foo>') def test_predicate_substring_after_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[substring-after(name(), "f")="oo"]', input=xml, output='<foo>bar</foo>') def test_predicate_substring_before_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[substring-before(name(), "oo")="f"]', input=xml, output='<foo>bar</foo>') def test_predicate_translate_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[translate(name(), "fo", "ba")="baa"]', input=xml, output='<foo>bar</foo>') def test_predicate_true_function(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval('*[true()]', input=xml, output='<foo>bar</foo>') def test_predicate_variable(self): xml = XML('<root><foo>bar</foo></root>') self._test_eval( path = '*[name()=$bar]', input = xml, output = '<foo>bar</foo>', variables = {'bar': 'foo'} ) def test_predicate_position(self): xml = XML('<root><foo id="a1"/><foo id="a2"/><foo id="a3"/></root>') self._test_eval('*[2]', input=xml, output='<foo id="a2"/>') def test_predicate_attr_and_position(self): xml = XML('<root><foo/><foo id="a1"/><foo id="a2"/></root>') self._test_eval('*[@id][2]', input=xml, output='<foo id="a2"/>') def test_predicate_position_and_attr(self): xml = XML('<root><foo/><foo id="a1"/><foo id="a2"/></root>') self._test_eval('*[1][@id]', input=xml, output='') self._test_eval('*[2][@id]', input=xml, output='<foo id="a1"/>') def test_predicate_advanced_position(self): xml = XML('<root><a><b><c><d><e/></d></c></b></a></root>') self._test_eval( 'descendant-or-self::*/' 'descendant-or-self::*/' 'descendant-or-self::*[2]/' 'self::*/descendant::*[3]', input=xml, output='<d><e/></d>') def test_predicate_child_position(self): xml = XML('\ <root><a><b>1</b><b>2</b><b>3</b></a><a><b>4</b><b>5</b></a></root>') self._test_eval('//a/b[2]', input=xml, output='<b>2</b><b>5</b>') self._test_eval('//a/b[3]', input=xml, output='<b>3</b>') def test_name_with_namespace(self): xml = XML('<root xmlns:f="FOO"><f:foo>bar</f:foo></root>') self._test_eval( path = 'f:foo', equiv = '<Path "child::f:foo">', input = xml, output = '<foo xmlns="FOO">bar</foo>', namespaces = {'f': 'FOO'} ) def test_wildcard_with_namespace(self): xml = XML('<root xmlns:f="FOO"><f:foo>bar</f:foo></root>') self._test_eval( path = 'f:*', equiv = '<Path "child::f:*">', input = xml, output = '<foo xmlns="FOO">bar</foo>', namespaces = {'f': 'FOO'} ) def test_predicate_termination(self): """ Verify that a patch matching the self axis with a predicate doesn't cause an infinite loop. See <http://genshi.edgewall.org/ticket/82>. """ xml = XML('<ul flag="1"><li>a</li><li>b</li></ul>') self._test_eval('.[@flag="1"]/*', input=xml, output='<li>a</li><li>b</li>') xml = XML('<ul flag="1"><li>a</li><li>b</li></ul>') self._test_eval('.[@flag="0"]/*', input=xml, output='') def test_attrname_with_namespace(self): xml = XML('<root xmlns:f="FOO"><foo f:bar="baz"/></root>') self._test_eval('foo[@f:bar]', input=xml, output='<foo xmlns:ns1="FOO" ns1:bar="baz"/>', namespaces={'f': 'FOO'}) def test_attrwildcard_with_namespace(self): xml = XML('<root xmlns:f="FOO"><foo f:bar="baz"/></root>') self._test_eval('foo[@f:*]', input=xml, output='<foo xmlns:ns1="FOO" ns1:bar="baz"/>', namespaces={'f': 'FOO'}) def test_self_and_descendant(self): xml = XML('<root><foo/></root>') self._test_eval('self::root', input=xml, output='<root><foo/></root>') self._test_eval('self::foo', input=xml, output='') self._test_eval('descendant::root', input=xml, output='') self._test_eval('descendant::foo', input=xml, output='<foo/>') self._test_eval('descendant-or-self::root', input=xml, output='<root><foo/></root>') self._test_eval('descendant-or-self::foo', input=xml, output='<foo/>') def test_long_simple_paths(self): xml = XML('<root><a><b><a><d><a><b><a><b><a><b><a><c>!' '</c></a></b></a></b></a></b></a></d></a></b></a></root>') self._test_eval('//a/b/a/b/a/c', input=xml, output='<c>!</c>') self._test_eval('//a/b/a/c', input=xml, output='<c>!</c>') self._test_eval('//a/c', input=xml, output='<c>!</c>') self._test_eval('//c', input=xml, output='<c>!</c>') # Please note that a//b is NOT the same as a/descendant::b # it is a/descendant-or-self::node()/b, which SimplePathStrategy # does NOT support self._test_eval('a/b/descendant::a/c', input=xml, output='<c>!</c>') self._test_eval('a/b/descendant::a/d/descendant::a/c', input=xml, output='<c>!</c>') self._test_eval('a/b/descendant::a/d/a/c', input=xml, output='') self._test_eval('//d/descendant::b/descendant::b/descendant::b' '/descendant::c', input=xml, output='<c>!</c>') self._test_eval('//d/descendant::b/descendant::b/descendant::b' '/descendant::b/descendant::c', input=xml, output='') def _test_support(self, strategy_class, text): path = PathParser(text, None, -1).parse()[0] return strategy_class.supports(path) def test_simple_strategy_support(self): self.assert_(self._test_support(SimplePathStrategy, 'a/b')) self.assert_(self._test_support(SimplePathStrategy, 'self::a/b')) self.assert_(self._test_support(SimplePathStrategy, 'descendant::a/b')) self.assert_(self._test_support(SimplePathStrategy, 'descendant-or-self::a/b')) self.assert_(self._test_support(SimplePathStrategy, '//a/b')) self.assert_(self._test_support(SimplePathStrategy, 'a/@b')) self.assert_(self._test_support(SimplePathStrategy, 'a/text()')) # a//b is a/descendant-or-self::node()/b self.assert_(not self._test_support(SimplePathStrategy, 'a//b')) self.assert_(not self._test_support(SimplePathStrategy, 'node()/@a')) self.assert_(not self._test_support(SimplePathStrategy, '@a')) self.assert_(not self._test_support(SimplePathStrategy, 'foo:bar')) self.assert_(not self._test_support(SimplePathStrategy, 'a/@foo:bar')) def _test_strategies(self, input, path, output, namespaces=None, variables=None): for strategy in self.strategies: if not strategy.supports(path): continue s = strategy(path) rendered = FakePath(s).select(input, namespaces=namespaces, variables=variables) \ .render(encoding=None) msg = 'Bad render using %s strategy' % str(strategy) msg += '\nExpected:\t%r' % output msg += '\nRendered:\t%r' % rendered self.assertEqual(output, rendered, msg) def _test_eval(self, path, equiv=None, input=None, output='', namespaces=None, variables=None): path = Path(path) if equiv is not None: self.assertEqual(equiv, repr(path)) if input is None: return rendered = path.select(input, namespaces=namespaces, variables=variables).render(encoding=None) msg = 'Bad output using whole path' msg += '\nExpected:\t%r' % output msg += '\nRendered:\t%r' % rendered self.assertEqual(output, rendered, msg) if len(path.paths) == 1: self._test_strategies(input, path.paths[0], output, namespaces=namespaces, variables=variables) def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(Path.__module__)) suite.addTest(unittest.makeSuite(PathTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.0201, 0.0014, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0215, 0.0014, 0, 0.66, 0.1429, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.0244, 0.0014, 0, 0....
[ "import doctest", "import unittest", "from genshi.input import XML", "from genshi.path import Path, PathParser, PathSyntaxError, GenericStrategy, \\\n SingleStepStrategy, SimplePathStrategy", "class FakePath(Path):\n def __init__(self, strategy):\n self.strategy = strategy\n...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest from StringIO import StringIO import sys import unittest from genshi.core import Attrs, Stream from genshi.input import XMLParser, HTMLParser, ParseError class XMLParserTestCase(unittest.TestCase): def test_text_node_pos_single_line(self): text = '<elem>foo bar</elem>' events = list(XMLParser(StringIO(text))) kind, data, pos = events[1] self.assertEqual(Stream.TEXT, kind) self.assertEqual('foo bar', data) self.assertEqual((None, 1, 6), pos) def test_text_node_pos_multi_line(self): text = '''<elem>foo bar</elem>''' events = list(XMLParser(StringIO(text))) kind, data, pos = events[1] self.assertEqual(Stream.TEXT, kind) self.assertEqual('foo\nbar', data) self.assertEqual((None, 1, -1), pos) def test_element_attribute_order(self): text = '<elem title="baz" id="foo" class="bar" />' events = list(XMLParser(StringIO(text))) kind, data, pos = events[0] self.assertEqual(Stream.START, kind) tag, attrib = data self.assertEqual('elem', tag) self.assertEqual(('title', 'baz'), attrib[0]) self.assertEqual(('id', 'foo'), attrib[1]) self.assertEqual(('class', 'bar'), attrib[2]) def test_unicode_input(self): text = u'<div>\u2013</div>' events = list(XMLParser(StringIO(text))) kind, data, pos = events[1] self.assertEqual(Stream.TEXT, kind) self.assertEqual(u'\u2013', data) def test_latin1_encoded(self): text = u'<div>\xf6</div>'.encode('iso-8859-1') events = list(XMLParser(StringIO(text), encoding='iso-8859-1')) kind, data, pos = events[1] self.assertEqual(Stream.TEXT, kind) self.assertEqual(u'\xf6', data) def test_latin1_encoded_xmldecl(self): text = u"""<?xml version="1.0" encoding="iso-8859-1" ?> <div>\xf6</div> """.encode('iso-8859-1') events = list(XMLParser(StringIO(text))) kind, data, pos = events[2] self.assertEqual(Stream.TEXT, kind) self.assertEqual(u'\xf6', data) def test_html_entity_with_dtd(self): text = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html>&nbsp;</html> """ events = list(XMLParser(StringIO(text))) kind, data, pos = events[2] self.assertEqual(Stream.TEXT, kind) self.assertEqual(u'\xa0', data) def test_html_entity_without_dtd(self): text = '<html>&nbsp;</html>' events = list(XMLParser(StringIO(text))) kind, data, pos = events[1] self.assertEqual(Stream.TEXT, kind) self.assertEqual(u'\xa0', data) def test_html_entity_in_attribute(self): text = '<p title="&nbsp;"/>' events = list(XMLParser(StringIO(text))) kind, data, pos = events[0] self.assertEqual(Stream.START, kind) self.assertEqual(u'\xa0', data[1].get('title')) kind, data, pos = events[1] self.assertEqual(Stream.END, kind) def test_undefined_entity_with_dtd(self): text = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html>&junk;</html> """ events = XMLParser(StringIO(text)) self.assertRaises(ParseError, list, events) def test_undefined_entity_without_dtd(self): text = '<html>&junk;</html>' events = XMLParser(StringIO(text)) self.assertRaises(ParseError, list, events) class HTMLParserTestCase(unittest.TestCase): def test_text_node_pos_single_line(self): text = '<elem>foo bar</elem>' events = list(HTMLParser(StringIO(text))) kind, data, pos = events[1] self.assertEqual(Stream.TEXT, kind) self.assertEqual('foo bar', data) self.assertEqual((None, 1, 6), pos) def test_text_node_pos_multi_line(self): text = '''<elem>foo bar</elem>''' events = list(HTMLParser(StringIO(text))) kind, data, pos = events[1] self.assertEqual(Stream.TEXT, kind) self.assertEqual('foo\nbar', data) self.assertEqual((None, 1, 6), pos) def test_input_encoding_text(self): text = u'<div>\xf6</div>'.encode('iso-8859-1') events = list(HTMLParser(StringIO(text), encoding='iso-8859-1')) kind, data, pos = events[1] self.assertEqual(Stream.TEXT, kind) self.assertEqual(u'\xf6', data) def test_input_encoding_attribute(self): text = u'<div title="\xf6"></div>'.encode('iso-8859-1') events = list(HTMLParser(StringIO(text), encoding='iso-8859-1')) kind, (tag, attrib), pos = events[0] self.assertEqual(Stream.START, kind) self.assertEqual(u'\xf6', attrib.get('title')) def test_unicode_input(self): text = u'<div>\u2013</div>' events = list(HTMLParser(StringIO(text))) kind, data, pos = events[1] self.assertEqual(Stream.TEXT, kind) self.assertEqual(u'\u2013', data) def test_html_entity_in_attribute(self): text = '<p title="&nbsp;"></p>' events = list(HTMLParser(StringIO(text))) kind, data, pos = events[0] self.assertEqual(Stream.START, kind) self.assertEqual(u'\xa0', data[1].get('title')) kind, data, pos = events[1] self.assertEqual(Stream.END, kind) def test_html_entity_in_text(self): text = '<p>&nbsp;</p>' events = list(HTMLParser(StringIO(text))) kind, data, pos = events[1] self.assertEqual(Stream.TEXT, kind) self.assertEqual(u'\xa0', data) def test_processing_instruction(self): text = '<?php echo "Foobar" ?>' events = list(HTMLParser(StringIO(text))) kind, (target, data), pos = events[0] self.assertEqual(Stream.PI, kind) self.assertEqual('php', target) self.assertEqual('echo "Foobar"', data) def test_xmldecl(self): text = '<?xml version="1.0" ?><root />' events = list(XMLParser(StringIO(text))) kind, (version, encoding, standalone), pos = events[0] self.assertEqual(Stream.XML_DECL, kind) self.assertEqual('1.0', version) self.assertEqual(None, encoding) self.assertEqual(-1, standalone) def test_xmldecl_encoding(self): text = '<?xml version="1.0" encoding="utf-8" ?><root />' events = list(XMLParser(StringIO(text))) kind, (version, encoding, standalone), pos = events[0] self.assertEqual(Stream.XML_DECL, kind) self.assertEqual('1.0', version) self.assertEqual('utf-8', encoding) self.assertEqual(-1, standalone) def test_xmldecl_standalone(self): text = '<?xml version="1.0" standalone="yes" ?><root />' events = list(XMLParser(StringIO(text))) kind, (version, encoding, standalone), pos = events[0] self.assertEqual(Stream.XML_DECL, kind) self.assertEqual('1.0', version) self.assertEqual(None, encoding) self.assertEqual(1, standalone) def test_processing_instruction_trailing_qmark(self): text = '<?php echo "Foobar" ??>' events = list(HTMLParser(StringIO(text))) kind, (target, data), pos = events[0] self.assertEqual(Stream.PI, kind) self.assertEqual('php', target) self.assertEqual('echo "Foobar" ?', data) def test_out_of_order_tags1(self): text = '<span><b>Foobar</span></b>' events = list(HTMLParser(StringIO(text))) self.assertEqual(5, len(events)) self.assertEqual((Stream.START, ('span', ())), events[0][:2]) self.assertEqual((Stream.START, ('b', ())), events[1][:2]) self.assertEqual((Stream.TEXT, 'Foobar'), events[2][:2]) self.assertEqual((Stream.END, 'b'), events[3][:2]) self.assertEqual((Stream.END, 'span'), events[4][:2]) def test_out_of_order_tags2(self): text = '<span class="baz"><b><i>Foobar</span></b></i>' events = list(HTMLParser(StringIO(text))) self.assertEqual(7, len(events)) self.assertEqual((Stream.START, ('span', Attrs([('class', 'baz')]))), events[0][:2]) self.assertEqual((Stream.START, ('b', ())), events[1][:2]) self.assertEqual((Stream.START, ('i', ())), events[2][:2]) self.assertEqual((Stream.TEXT, 'Foobar'), events[3][:2]) self.assertEqual((Stream.END, 'i'), events[4][:2]) self.assertEqual((Stream.END, 'b'), events[5][:2]) self.assertEqual((Stream.END, 'span'), events[6][:2]) def test_out_of_order_tags3(self): text = '<span><b>Foobar</i>' events = list(HTMLParser(StringIO(text))) self.assertEqual(5, len(events)) self.assertEqual((Stream.START, ('span', ())), events[0][:2]) self.assertEqual((Stream.START, ('b', ())), events[1][:2]) self.assertEqual((Stream.TEXT, 'Foobar'), events[2][:2]) self.assertEqual((Stream.END, 'b'), events[3][:2]) self.assertEqual((Stream.END, 'span'), events[4][:2]) def test_hex_charref(self): text = '<span>&#x27;</span>' events = list(HTMLParser(StringIO(text))) self.assertEqual(3, len(events)) self.assertEqual((Stream.START, ('span', ())), events[0][:2]) self.assertEqual((Stream.TEXT, "'"), events[1][:2]) self.assertEqual((Stream.END, 'span'), events[2][:2]) def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(XMLParser.__module__)) suite.addTest(unittest.makeSuite(XMLParserTestCase, 'test')) suite.addTest(unittest.makeSuite(HTMLParserTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.0528, 0.0038, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0566, 0.0038, 0, 0.66, 0.1111, 609, 0, 1, 0, 0, 609, 0, 0 ], [ 1, 0, 0.0604, 0.0038, 0, ...
[ "import doctest", "from StringIO import StringIO", "import sys", "import unittest", "from genshi.core import Attrs, Stream", "from genshi.input import XMLParser, HTMLParser, ParseError", "class XMLParserTestCase(unittest.TestCase):\n\n def test_text_node_pos_single_line(self):\n text = '<elem>...
# -*- coding: utf-8 -*- # # Copyright (C) 2006 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import unittest def suite(): import genshi from genshi.tests import builder, core, input, output, path, util from genshi.filters import tests as filters from genshi.template import tests as template suite = unittest.TestSuite() suite.addTest(builder.suite()) suite.addTest(core.suite()) suite.addTest(filters.suite()) suite.addTest(input.suite()) suite.addTest(output.suite()) suite.addTest(path.suite()) suite.addTest(template.suite()) suite.addTest(util.suite()) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.4118, 0.0294, 0, 0.66, 0, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 2, 0, 0.6912, 0.4706, 0, 0.66, 0.5, 425, 0, 0, 1, 0, 0, 0, 17 ], [ 1, 1, 0.5, 0.0294, 1, 0.65, ...
[ "import unittest", "def suite():\n import genshi\n from genshi.tests import builder, core, input, output, path, util\n from genshi.filters import tests as filters\n from genshi.template import tests as template\n\n suite = unittest.TestSuite()\n suite.addTest(builder.suite())", " import gen...
# -*- coding: utf-8 -*- # # Copyright (C) 2006 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import unittest from genshi.builder import Element, tag from genshi.core import Attrs, Markup, Stream from genshi.input import XML class ElementFactoryTestCase(unittest.TestCase): def test_link(self): link = tag.a(href='#', title='Foo', accesskey=None)('Bar') events = list(link.generate()) self.assertEqual((Stream.START, ('a', Attrs([('href', "#"), ('title', "Foo")])), (None, -1, -1)), events[0]) self.assertEqual((Stream.TEXT, 'Bar', (None, -1, -1)), events[1]) self.assertEqual((Stream.END, 'a', (None, -1, -1)), events[2]) def test_nonstring_attributes(self): """ Verify that if an attribute value is given as an int (or some other non-string type), it is coverted to a string when the stream is generated. """ events = list(tag.foo(id=3)) self.assertEqual((Stream.START, ('foo', Attrs([('id', '3')])), (None, -1, -1)), events[0]) def test_duplicate_attributes(self): link = tag.a(href='#1', href_='#2')('Bar') events = list(link.generate()) self.assertEqual((Stream.START, ('a', Attrs([('href', "#1")])), (None, -1, -1)), events[0]) self.assertEqual((Stream.TEXT, 'Bar', (None, -1, -1)), events[1]) self.assertEqual((Stream.END, 'a', (None, -1, -1)), events[2]) def test_stream_as_child(self): events = list(tag.span(XML('<b>Foo</b>')).generate()) self.assertEqual(5, len(events)) self.assertEqual((Stream.START, ('span', ())), events[0][:2]) self.assertEqual((Stream.START, ('b', ())), events[1][:2]) self.assertEqual((Stream.TEXT, 'Foo'), events[2][:2]) self.assertEqual((Stream.END, 'b'), events[3][:2]) self.assertEqual((Stream.END, 'span'), events[4][:2]) def test_markup_escape(self): m = Markup('See %s') % tag.a('genshi', href='http://genshi.edgwall.org') self.assertEqual(m, Markup('See <a href="http://genshi.edgwall.org">' 'genshi</a>')) def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(Element.__module__)) suite.addTest(unittest.makeSuite(ElementFactoryTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.1867, 0.0133, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.2, 0.0133, 0, 0.66, 0.1429, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.2267, 0.0133, 0, 0.66,...
[ "import doctest", "import unittest", "from genshi.builder import Element, tag", "from genshi.core import Attrs, Markup, Stream", "from genshi.input import XML", "class ElementFactoryTestCase(unittest.TestCase):\n\n def test_link(self):\n link = tag.a(href='#', title='Foo', accesskey=None)('Bar')...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """This module provides different kinds of serialization methods for XML event streams. """ from itertools import chain import re from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, \ START_CDATA, END_CDATA, PI, COMMENT, XML_NAMESPACE __all__ = ['encode', 'get_serializer', 'DocType', 'XMLSerializer', 'XHTMLSerializer', 'HTMLSerializer', 'TextSerializer'] __docformat__ = 'restructuredtext en' def encode(iterator, method='xml', encoding='utf-8', out=None): """Encode serializer output into a string. :param iterator: the iterator returned from serializing a stream (basically any iterator that yields unicode objects) :param method: the serialization method; determines how characters not representable in the specified encoding are treated :param encoding: how the output string should be encoded; if set to `None`, this method returns a `unicode` object :param out: a file-like object that the output should be written to instead of being returned as one big string; note that if this is a file or socket (or similar), the `encoding` must not be `None` (that is, the output must be encoded) :return: a `str` or `unicode` object (depending on the `encoding` parameter), or `None` if the `out` parameter is provided :since: version 0.4.1 :note: Changed in 0.5: added the `out` parameter """ if encoding is not None: errors = 'replace' if method != 'text' and not isinstance(method, TextSerializer): errors = 'xmlcharrefreplace' _encode = lambda string: string.encode(encoding, errors) else: _encode = lambda string: string if out is None: return _encode(''.join(list(iterator))) for chunk in iterator: out.write(_encode(chunk)) def get_serializer(method='xml', **kwargs): """Return a serializer object for the given method. :param method: the serialization method; can be either "xml", "xhtml", "html", "text", or a custom serializer class Any additional keyword arguments are passed to the serializer, and thus depend on the `method` parameter value. :see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer` :since: version 0.4.1 """ if isinstance(method, basestring): method = {'xml': XMLSerializer, 'xhtml': XHTMLSerializer, 'html': HTMLSerializer, 'text': TextSerializer}[method.lower()] return method(**kwargs) class DocType(object): """Defines a number of commonly used DOCTYPE declarations as constants.""" HTML_STRICT = ( 'html', '-//W3C//DTD HTML 4.01//EN', 'http://www.w3.org/TR/html4/strict.dtd' ) HTML_TRANSITIONAL = ( 'html', '-//W3C//DTD HTML 4.01 Transitional//EN', 'http://www.w3.org/TR/html4/loose.dtd' ) HTML_FRAMESET = ( 'html', '-//W3C//DTD HTML 4.01 Frameset//EN', 'http://www.w3.org/TR/html4/frameset.dtd' ) HTML = HTML_STRICT HTML5 = ('html', None, None) XHTML_STRICT = ( 'html', '-//W3C//DTD XHTML 1.0 Strict//EN', 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd' ) XHTML_TRANSITIONAL = ( 'html', '-//W3C//DTD XHTML 1.0 Transitional//EN', 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd' ) XHTML_FRAMESET = ( 'html', '-//W3C//DTD XHTML 1.0 Frameset//EN', 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd' ) XHTML = XHTML_STRICT XHTML11 = ( 'html', '-//W3C//DTD XHTML 1.1//EN', 'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd' ) SVG_FULL = ( 'svg', '-//W3C//DTD SVG 1.1//EN', 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd' ) SVG_BASIC = ( 'svg', '-//W3C//DTD SVG Basic 1.1//EN', 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd' ) SVG_TINY = ( 'svg', '-//W3C//DTD SVG Tiny 1.1//EN', 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd' ) SVG = SVG_FULL @classmethod def get(cls, name): """Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE`` declaration for the specified name. The following names are recognized in this version: * "html" or "html-strict" for the HTML 4.01 strict DTD * "html-transitional" for the HTML 4.01 transitional DTD * "html-frameset" for the HTML 4.01 frameset DTD * "html5" for the ``DOCTYPE`` proposed for HTML5 * "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD * "xhtml-transitional" for the XHTML 1.0 transitional DTD * "xhtml-frameset" for the XHTML 1.0 frameset DTD * "xhtml11" for the XHTML 1.1 DTD * "svg" or "svg-full" for the SVG 1.1 DTD * "svg-basic" for the SVG Basic 1.1 DTD * "svg-tiny" for the SVG Tiny 1.1 DTD :param name: the name of the ``DOCTYPE`` :return: the ``(name, pubid, sysid)`` tuple for the requested ``DOCTYPE``, or ``None`` if the name is not recognized :since: version 0.4.1 """ return { 'html': cls.HTML, 'html-strict': cls.HTML_STRICT, 'html-transitional': DocType.HTML_TRANSITIONAL, 'html-frameset': DocType.HTML_FRAMESET, 'html5': cls.HTML5, 'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT, 'xhtml-transitional': cls.XHTML_TRANSITIONAL, 'xhtml-frameset': cls.XHTML_FRAMESET, 'xhtml11': cls.XHTML11, 'svg': cls.SVG, 'svg-full': cls.SVG_FULL, 'svg-basic': cls.SVG_BASIC, 'svg-tiny': cls.SVG_TINY }.get(name.lower()) class XMLSerializer(object): """Produces XML text from an event stream. >>> from genshi.builder import tag >>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True)) >>> print(''.join(XMLSerializer()(elem.generate()))) <div><a href="foo"/><br/><hr noshade="True"/></div> """ _PRESERVE_SPACE = frozenset() def __init__(self, doctype=None, strip_whitespace=True, namespace_prefixes=None, cache=True): """Initialize the XML serializer. :param doctype: a ``(name, pubid, sysid)`` tuple that represents the DOCTYPE declaration that should be included at the top of the generated output, or the name of a DOCTYPE as defined in `DocType.get` :param strip_whitespace: whether extraneous whitespace should be stripped from the output :param cache: whether to cache the text output per event, which improves performance for repetitive markup :note: Changed in 0.4.2: The `doctype` parameter can now be a string. :note: Changed in 0.6: The `cache` parameter was added """ self.filters = [EmptyTagFilter()] if strip_whitespace: self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE)) self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes, cache=cache)) if doctype: self.filters.append(DocTypeInserter(doctype)) self.cache = cache def __call__(self, stream): have_decl = have_doctype = False in_cdata = False cache = {} cache_get = cache.get if self.cache: def _emit(kind, input, output): cache[kind, input] = output return output else: def _emit(kind, input, output): return output for filter_ in self.filters: stream = filter_(stream) for kind, data, pos in stream: cached = cache_get((kind, data)) if cached is not None: yield cached elif kind is START or kind is EMPTY: tag, attrib = data buf = ['<', tag] for attr, value in attrib: buf += [' ', attr, '="', escape(value), '"'] buf.append(kind is EMPTY and '/>' or '>') yield _emit(kind, data, Markup(''.join(buf))) elif kind is END: yield _emit(kind, data, Markup('</%s>' % data)) elif kind is TEXT: if in_cdata: yield _emit(kind, data, data) else: yield _emit(kind, data, escape(data, quotes=False)) elif kind is COMMENT: yield _emit(kind, data, Markup('<!--%s-->' % data)) elif kind is XML_DECL and not have_decl: version, encoding, standalone = data buf = ['<?xml version="%s"' % version] if encoding: buf.append(' encoding="%s"' % encoding) if standalone != -1: standalone = standalone and 'yes' or 'no' buf.append(' standalone="%s"' % standalone) buf.append('?>\n') yield Markup(''.join(buf)) have_decl = True elif kind is DOCTYPE and not have_doctype: name, pubid, sysid = data buf = ['<!DOCTYPE %s'] if pubid: buf.append(' PUBLIC "%s"') elif sysid: buf.append(' SYSTEM') if sysid: buf.append(' "%s"') buf.append('>\n') yield Markup(''.join(buf)) % tuple([p for p in data if p]) have_doctype = True elif kind is START_CDATA: yield Markup('<![CDATA[') in_cdata = True elif kind is END_CDATA: yield Markup(']]>') in_cdata = False elif kind is PI: yield _emit(kind, data, Markup('<?%s %s?>' % data)) class XHTMLSerializer(XMLSerializer): """Produces XHTML text from an event stream. >>> from genshi.builder import tag >>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True)) >>> print(''.join(XHTMLSerializer()(elem.generate()))) <div><a href="foo"></a><br /><hr noshade="noshade" /></div> """ _EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', 'img', 'input', 'isindex', 'link', 'meta', 'param']) _BOOLEAN_ATTRS = frozenset(['selected', 'checked', 'compact', 'declare', 'defer', 'disabled', 'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap']) _PRESERVE_SPACE = frozenset([ QName('pre'), QName('http://www.w3.org/1999/xhtml}pre'), QName('textarea'), QName('http://www.w3.org/1999/xhtml}textarea') ]) def __init__(self, doctype=None, strip_whitespace=True, namespace_prefixes=None, drop_xml_decl=True, cache=True): super(XHTMLSerializer, self).__init__(doctype, False) self.filters = [EmptyTagFilter()] if strip_whitespace: self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE)) namespace_prefixes = namespace_prefixes or {} namespace_prefixes['http://www.w3.org/1999/xhtml'] = '' self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes, cache=cache)) if doctype: self.filters.append(DocTypeInserter(doctype)) self.drop_xml_decl = drop_xml_decl self.cache = cache def __call__(self, stream): boolean_attrs = self._BOOLEAN_ATTRS empty_elems = self._EMPTY_ELEMS drop_xml_decl = self.drop_xml_decl have_decl = have_doctype = False in_cdata = False cache = {} cache_get = cache.get if self.cache: def _emit(kind, input, output): cache[kind, input] = output return output else: def _emit(kind, input, output): return output for filter_ in self.filters: stream = filter_(stream) for kind, data, pos in stream: cached = cache_get((kind, data)) if cached is not None: yield cached elif kind is START or kind is EMPTY: tag, attrib = data buf = ['<', tag] for attr, value in attrib: if attr in boolean_attrs: value = attr elif attr == 'xml:lang' and 'lang' not in attrib: buf += [' lang="', escape(value), '"'] elif attr == 'xml:space': continue buf += [' ', attr, '="', escape(value), '"'] if kind is EMPTY: if tag in empty_elems: buf.append(' />') else: buf.append('></%s>' % tag) else: buf.append('>') yield _emit(kind, data, Markup(''.join(buf))) elif kind is END: yield _emit(kind, data, Markup('</%s>' % data)) elif kind is TEXT: if in_cdata: yield _emit(kind, data, data) else: yield _emit(kind, data, escape(data, quotes=False)) elif kind is COMMENT: yield _emit(kind, data, Markup('<!--%s-->' % data)) elif kind is DOCTYPE and not have_doctype: name, pubid, sysid = data buf = ['<!DOCTYPE %s'] if pubid: buf.append(' PUBLIC "%s"') elif sysid: buf.append(' SYSTEM') if sysid: buf.append(' "%s"') buf.append('>\n') yield Markup(''.join(buf)) % tuple([p for p in data if p]) have_doctype = True elif kind is XML_DECL and not have_decl and not drop_xml_decl: version, encoding, standalone = data buf = ['<?xml version="%s"' % version] if encoding: buf.append(' encoding="%s"' % encoding) if standalone != -1: standalone = standalone and 'yes' or 'no' buf.append(' standalone="%s"' % standalone) buf.append('?>\n') yield Markup(''.join(buf)) have_decl = True elif kind is START_CDATA: yield Markup('<![CDATA[') in_cdata = True elif kind is END_CDATA: yield Markup(']]>') in_cdata = False elif kind is PI: yield _emit(kind, data, Markup('<?%s %s?>' % data)) class HTMLSerializer(XHTMLSerializer): """Produces HTML text from an event stream. >>> from genshi.builder import tag >>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True)) >>> print(''.join(HTMLSerializer()(elem.generate()))) <div><a href="foo"></a><br><hr noshade></div> """ _NOESCAPE_ELEMS = frozenset([ QName('script'), QName('http://www.w3.org/1999/xhtml}script'), QName('style'), QName('http://www.w3.org/1999/xhtml}style') ]) def __init__(self, doctype=None, strip_whitespace=True, cache=True): """Initialize the HTML serializer. :param doctype: a ``(name, pubid, sysid)`` tuple that represents the DOCTYPE declaration that should be included at the top of the generated output :param strip_whitespace: whether extraneous whitespace should be stripped from the output :param cache: whether to cache the text output per event, which improves performance for repetitive markup :note: Changed in 0.6: The `cache` parameter was added """ super(HTMLSerializer, self).__init__(doctype, False) self.filters = [EmptyTagFilter()] if strip_whitespace: self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE, self._NOESCAPE_ELEMS)) self.filters.append(NamespaceFlattener(prefixes={ 'http://www.w3.org/1999/xhtml': '' }, cache=cache)) if doctype: self.filters.append(DocTypeInserter(doctype)) self.cache = True def __call__(self, stream): boolean_attrs = self._BOOLEAN_ATTRS empty_elems = self._EMPTY_ELEMS noescape_elems = self._NOESCAPE_ELEMS have_doctype = False noescape = False cache = {} cache_get = cache.get if self.cache: def _emit(kind, input, output): cache[kind, input] = output return output else: def _emit(kind, input, output): return output for filter_ in self.filters: stream = filter_(stream) for kind, data, _ in stream: output = cache_get((kind, data)) if output is not None: yield output if (kind is START or kind is EMPTY) \ and data[0] in noescape_elems: noescape = True elif kind is END: noescape = False elif kind is START or kind is EMPTY: tag, attrib = data buf = ['<', tag] for attr, value in attrib: if attr in boolean_attrs: if value: buf += [' ', attr] elif ':' in attr: if attr == 'xml:lang' and 'lang' not in attrib: buf += [' lang="', escape(value), '"'] elif attr != 'xmlns': buf += [' ', attr, '="', escape(value), '"'] buf.append('>') if kind is EMPTY: if tag not in empty_elems: buf.append('</%s>' % tag) yield _emit(kind, data, Markup(''.join(buf))) if tag in noescape_elems: noescape = True elif kind is END: yield _emit(kind, data, Markup('</%s>' % data)) noescape = False elif kind is TEXT: if noescape: yield _emit(kind, data, data) else: yield _emit(kind, data, escape(data, quotes=False)) elif kind is COMMENT: yield _emit(kind, data, Markup('<!--%s-->' % data)) elif kind is DOCTYPE and not have_doctype: name, pubid, sysid = data buf = ['<!DOCTYPE %s'] if pubid: buf.append(' PUBLIC "%s"') elif sysid: buf.append(' SYSTEM') if sysid: buf.append(' "%s"') buf.append('>\n') yield Markup(''.join(buf)) % tuple([p for p in data if p]) have_doctype = True elif kind is PI: yield _emit(kind, data, Markup('<?%s %s?>' % data)) class TextSerializer(object): """Produces plain text from an event stream. Only text events are included in the output. Unlike the other serializer, special XML characters are not escaped: >>> from genshi.builder import tag >>> elem = tag.div(tag.a('<Hello!>', href='foo'), tag.br) >>> print(elem) <div><a href="foo">&lt;Hello!&gt;</a><br/></div> >>> print(''.join(TextSerializer()(elem.generate()))) <Hello!> If text events contain literal markup (instances of the `Markup` class), that markup is by default passed through unchanged: >>> elem = tag.div(Markup('<a href="foo">Hello &amp; Bye!</a><br/>')) >>> print(elem.generate().render(TextSerializer, encoding=None)) <a href="foo">Hello &amp; Bye!</a><br/> You can use the ``strip_markup`` to change this behavior, so that tags and entities are stripped from the output (or in the case of entities, replaced with the equivalent character): >>> print(elem.generate().render(TextSerializer, strip_markup=True, ... encoding=None)) Hello & Bye! """ def __init__(self, strip_markup=False): """Create the serializer. :param strip_markup: whether markup (tags and encoded characters) found in the text should be removed """ self.strip_markup = strip_markup def __call__(self, stream): strip_markup = self.strip_markup for event in stream: if event[0] is TEXT: data = event[1] if strip_markup and type(data) is Markup: data = data.striptags().stripentities() yield unicode(data) class EmptyTagFilter(object): """Combines `START` and `STOP` events into `EMPTY` events for elements that have no contents. """ EMPTY = StreamEventKind('EMPTY') def __call__(self, stream): prev = (None, None, None) for ev in stream: if prev[0] is START: if ev[0] is END: prev = EMPTY, prev[1], prev[2] yield prev continue else: yield prev if ev[0] is not START: yield ev prev = ev EMPTY = EmptyTagFilter.EMPTY class NamespaceFlattener(object): r"""Output stream filter that removes namespace information from the stream, instead adding namespace attributes and prefixes as needed. :param prefixes: optional mapping of namespace URIs to prefixes >>> from genshi.input import XML >>> xml = XML('''<doc xmlns="NS1" xmlns:two="NS2"> ... <two:item/> ... </doc>''') >>> for kind, data, pos in NamespaceFlattener()(xml): ... print('%s %r' % (kind, data)) START (u'doc', Attrs([('xmlns', u'NS1'), (u'xmlns:two', u'NS2')])) TEXT u'\n ' START (u'two:item', Attrs()) END u'two:item' TEXT u'\n' END u'doc' """ def __init__(self, prefixes=None, cache=True): self.prefixes = {XML_NAMESPACE.uri: 'xml'} if prefixes is not None: self.prefixes.update(prefixes) self.cache = cache def __call__(self, stream): cache = {} cache_get = cache.get if self.cache: def _emit(kind, input, output, pos): cache[kind, input] = output return kind, output, pos else: def _emit(kind, input, output, pos): return output prefixes = dict([(v, [k]) for k, v in self.prefixes.items()]) namespaces = {XML_NAMESPACE.uri: ['xml']} def _push_ns(prefix, uri): namespaces.setdefault(uri, []).append(prefix) prefixes.setdefault(prefix, []).append(uri) cache.clear() def _pop_ns(prefix): uris = prefixes.get(prefix) uri = uris.pop() if not uris: del prefixes[prefix] if uri not in uris or uri != uris[-1]: uri_prefixes = namespaces[uri] uri_prefixes.pop() if not uri_prefixes: del namespaces[uri] cache.clear() return uri ns_attrs = [] _push_ns_attr = ns_attrs.append def _make_ns_attr(prefix, uri): return 'xmlns%s' % (prefix and ':%s' % prefix or ''), uri def _gen_prefix(): val = 0 while 1: val += 1 yield 'ns%d' % val _gen_prefix = _gen_prefix().next for kind, data, pos in stream: output = cache_get((kind, data)) if output is not None: yield kind, output, pos elif kind is START or kind is EMPTY: tag, attrs = data tagname = tag.localname tagns = tag.namespace if tagns: if tagns in namespaces: prefix = namespaces[tagns][-1] if prefix: tagname = '%s:%s' % (prefix, tagname) else: _push_ns_attr(('xmlns', tagns)) _push_ns('', tagns) new_attrs = [] for attr, value in attrs: attrname = attr.localname attrns = attr.namespace if attrns: if attrns not in namespaces: prefix = _gen_prefix() _push_ns(prefix, attrns) _push_ns_attr(('xmlns:%s' % prefix, attrns)) else: prefix = namespaces[attrns][-1] if prefix: attrname = '%s:%s' % (prefix, attrname) new_attrs.append((attrname, value)) yield _emit(kind, data, (tagname, Attrs(ns_attrs + new_attrs)), pos) del ns_attrs[:] elif kind is END: tagname = data.localname tagns = data.namespace if tagns: prefix = namespaces[tagns][-1] if prefix: tagname = '%s:%s' % (prefix, tagname) yield _emit(kind, data, tagname, pos) elif kind is START_NS: prefix, uri = data if uri not in namespaces: prefix = prefixes.get(uri, [prefix])[-1] _push_ns_attr(_make_ns_attr(prefix, uri)) _push_ns(prefix, uri) elif kind is END_NS: if data in prefixes: uri = _pop_ns(data) if ns_attrs: attr = _make_ns_attr(data, uri) if attr in ns_attrs: ns_attrs.remove(attr) else: yield kind, data, pos class WhitespaceFilter(object): """A filter that removes extraneous ignorable white space from the stream. """ def __init__(self, preserve=None, noescape=None): """Initialize the filter. :param preserve: a set or sequence of tag names for which white-space should be preserved :param noescape: a set or sequence of tag names for which text content should not be escaped The `noescape` set is expected to refer to elements that cannot contain further child elements (such as ``<style>`` or ``<script>`` in HTML documents). """ if preserve is None: preserve = [] self.preserve = frozenset(preserve) if noescape is None: noescape = [] self.noescape = frozenset(noescape) def __call__(self, stream, ctxt=None, space=XML_NAMESPACE['space'], trim_trailing_space=re.compile('[ \t]+(?=\n)').sub, collapse_lines=re.compile('\n{2,}').sub): mjoin = Markup('').join preserve_elems = self.preserve preserve = 0 noescape_elems = self.noescape noescape = False textbuf = [] push_text = textbuf.append pop_text = textbuf.pop for kind, data, pos in chain(stream, [(None, None, None)]): if kind is TEXT: if noescape: data = Markup(data) push_text(data) else: if textbuf: if len(textbuf) > 1: text = mjoin(textbuf, escape_quotes=False) del textbuf[:] else: text = escape(pop_text(), quotes=False) if not preserve: text = collapse_lines('\n', trim_trailing_space('', text)) yield TEXT, Markup(text), pos if kind is START: tag, attrs = data if preserve or (tag in preserve_elems or attrs.get(space) == 'preserve'): preserve += 1 if not noescape and tag in noescape_elems: noescape = True elif kind is END: noescape = False if preserve: preserve -= 1 elif kind is START_CDATA: noescape = True elif kind is END_CDATA: noescape = False if kind: yield kind, data, pos class DocTypeInserter(object): """A filter that inserts the DOCTYPE declaration in the correct location, after the XML declaration. """ def __init__(self, doctype): """Initialize the filter. :param doctype: DOCTYPE as a string or DocType object. """ if isinstance(doctype, basestring): doctype = DocType.get(doctype) self.doctype_event = (DOCTYPE, doctype, (None, -1, -1)) def __call__(self, stream): doctype_inserted = False for kind, data, pos in stream: if not doctype_inserted: doctype_inserted = True if kind is XML_DECL: yield (kind, data, pos) yield self.doctype_event continue yield self.doctype_event yield (kind, data, pos) if not doctype_inserted: yield self.doctype_event
[ [ 8, 0, 0.0179, 0.0036, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0215, 0.0012, 0, 0.66, 0.0556, 808, 0, 1, 0, 0, 808, 0, 0 ], [ 1, 0, 0.0227, 0.0012, 0, 0.66...
[ "\"\"\"This module provides different kinds of serialization methods for XML event\nstreams.\n\"\"\"", "from itertools import chain", "import re", "from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind", "from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_N...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Various utility classes and functions.""" import htmlentitydefs as entities import re __docformat__ = 'restructuredtext en' class LRUCache(dict): """A dictionary-like object that stores only a certain number of items, and discards its least recently used item when full. >>> cache = LRUCache(3) >>> cache['A'] = 0 >>> cache['B'] = 1 >>> cache['C'] = 2 >>> len(cache) 3 >>> cache['A'] 0 Adding new items to the cache does not increase its size. Instead, the least recently used item is dropped: >>> cache['D'] = 3 >>> len(cache) 3 >>> 'B' in cache False Iterating over the cache returns the keys, starting with the most recently used: >>> for key in cache: ... print(key) D A C This code is based on the LRUCache class from ``myghtyutils.util``, written by Mike Bayer and released under the MIT license. See: http://svn.myghty.org/myghtyutils/trunk/lib/myghtyutils/util.py """ class _Item(object): def __init__(self, key, value): self.prv = self.nxt = None self.key = key self.value = value def __repr__(self): return repr(self.value) def __init__(self, capacity): self._dict = dict() self.capacity = capacity self.head = None self.tail = None def __contains__(self, key): return key in self._dict def __iter__(self): cur = self.head while cur: yield cur.key cur = cur.nxt def __len__(self): return len(self._dict) def __getitem__(self, key): item = self._dict[key] self._update_item(item) return item.value def __setitem__(self, key, value): item = self._dict.get(key) if item is None: item = self._Item(key, value) self._dict[key] = item self._insert_item(item) else: item.value = value self._update_item(item) self._manage_size() def __repr__(self): return repr(self._dict) def _insert_item(self, item): item.prv = None item.nxt = self.head if self.head is not None: self.head.prv = item else: self.tail = item self.head = item self._manage_size() def _manage_size(self): while len(self._dict) > self.capacity: olditem = self._dict[self.tail.key] del self._dict[self.tail.key] if self.tail != self.head: self.tail = self.tail.prv self.tail.nxt = None else: self.head = self.tail = None def _update_item(self, item): if self.head == item: return prv = item.prv prv.nxt = item.nxt if item.nxt is not None: item.nxt.prv = prv else: self.tail = prv item.prv = None item.nxt = self.head self.head.prv = self.head = item def flatten(items): """Flattens a potentially nested sequence into a flat list. :param items: the sequence to flatten >>> flatten((1, 2)) [1, 2] >>> flatten([1, (2, 3), 4]) [1, 2, 3, 4] >>> flatten([1, (2, [3, 4]), 5]) [1, 2, 3, 4, 5] """ retval = [] for item in items: if isinstance(item, (frozenset, list, set, tuple)): retval += flatten(item) else: retval.append(item) return retval def plaintext(text, keeplinebreaks=True): """Return the text with all entities and tags removed. >>> plaintext('<b>1 &lt; 2</b>') u'1 < 2' The `keeplinebreaks` parameter can be set to ``False`` to replace any line breaks by simple spaces: >>> plaintext('''<b>1 ... &lt; ... 2</b>''', keeplinebreaks=False) u'1 < 2' :param text: the text to convert to plain text :param keeplinebreaks: whether line breaks in the text should be kept intact :return: the text with tags and entities removed """ text = stripentities(striptags(text)) if not keeplinebreaks: text = text.replace('\n', ' ') return text _STRIPENTITIES_RE = re.compile(r'&(?:#((?:\d+)|(?:[xX][0-9a-fA-F]+));?|(\w+);)') def stripentities(text, keepxmlentities=False): """Return a copy of the given text with any character or numeric entities replaced by the equivalent UTF-8 characters. >>> stripentities('1 &lt; 2') u'1 < 2' >>> stripentities('more &hellip;') u'more \u2026' >>> stripentities('&#8230;') u'\u2026' >>> stripentities('&#x2026;') u'\u2026' If the `keepxmlentities` parameter is provided and is a truth value, the core XML entities (&amp;, &apos;, &gt;, &lt; and &quot;) are left intact. >>> stripentities('1 &lt; 2 &hellip;', keepxmlentities=True) u'1 &lt; 2 \u2026' """ def _replace_entity(match): if match.group(1): # numeric entity ref = match.group(1) if ref.startswith('x'): ref = int(ref[1:], 16) else: ref = int(ref, 10) return unichr(ref) else: # character entity ref = match.group(2) if keepxmlentities and ref in ('amp', 'apos', 'gt', 'lt', 'quot'): return '&%s;' % ref try: return unichr(entities.name2codepoint[ref]) except KeyError: if keepxmlentities: return '&amp;%s;' % ref else: return ref return _STRIPENTITIES_RE.sub(_replace_entity, text) _STRIPTAGS_RE = re.compile(r'(<!--.*?-->|<[^>]*>)') def striptags(text): """Return a copy of the text with any XML/HTML tags removed. >>> striptags('<span>Foo</span> bar') 'Foo bar' >>> striptags('<span class="bar">Foo</span>') 'Foo' >>> striptags('Foo<br />') 'Foo' HTML/XML comments are stripped, too: >>> striptags('<!-- <blub>hehe</blah> -->test') 'test' :param text: the string to remove tags from :return: the text with tags removed """ return _STRIPTAGS_RE.sub('', text) def stringrepr(string): ascii = string.encode('ascii', 'backslashreplace') quoted = "'" + ascii.replace("'", "\\'") + "'" if len(ascii) > len(string): return 'u' + quoted return quoted # Compatibility fallback implementations for older Python versions try: all = all any = any except NameError: def any(S): for x in S: if x: return True return False def all(S): for x in S: if not x: return False return True
[ [ 8, 0, 0.0511, 0.0036, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0584, 0.0036, 0, 0.66, 0.0833, 744, 0, 1, 0, 0, 744, 0, 0 ], [ 1, 0, 0.062, 0.0036, 0, 0.66,...
[ "\"\"\"Various utility classes and functions.\"\"\"", "import htmlentitydefs as entities", "import re", "__docformat__ = 'restructuredtext en'", "class LRUCache(dict):\n \"\"\"A dictionary-like object that stores only a certain number of items, and\n discards its least recently used item when full.\n ...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Basic support for evaluating XPath expressions against streams. >>> from genshi.input import XML >>> doc = XML('''<doc> ... <items count="4"> ... <item status="new"> ... <summary>Foo</summary> ... </item> ... <item status="closed"> ... <summary>Bar</summary> ... </item> ... <item status="closed" resolution="invalid"> ... <summary>Baz</summary> ... </item> ... <item status="closed" resolution="fixed"> ... <summary>Waz</summary> ... </item> ... </items> ... </doc>''') >>> print(doc.select('items/item[@status="closed" and ' ... '(@resolution="invalid" or not(@resolution))]/summary/text()')) BarBaz Because the XPath engine operates on markup streams (as opposed to tree structures), it only implements a subset of the full XPath 1.0 language. """ from collections import deque try: reduce # builtin in Python < 3 except NameError: from functools import reduce from math import ceil, floor import operator import re from itertools import chain from genshi.core import Stream, Attrs, Namespace, QName from genshi.core import START, END, TEXT, START_NS, END_NS, COMMENT, PI, \ START_CDATA, END_CDATA __all__ = ['Path', 'PathSyntaxError'] __docformat__ = 'restructuredtext en' class Axis(object): """Defines constants for the various supported XPath axes.""" ATTRIBUTE = 'attribute' CHILD = 'child' DESCENDANT = 'descendant' DESCENDANT_OR_SELF = 'descendant-or-self' SELF = 'self' @classmethod def forname(cls, name): """Return the axis constant for the given name, or `None` if no such axis was defined. """ return getattr(cls, name.upper().replace('-', '_'), None) ATTRIBUTE = Axis.ATTRIBUTE CHILD = Axis.CHILD DESCENDANT = Axis.DESCENDANT DESCENDANT_OR_SELF = Axis.DESCENDANT_OR_SELF SELF = Axis.SELF class GenericStrategy(object): @classmethod def supports(cls, path): return True def __init__(self, path): self.path = path def test(self, ignore_context): p = self.path if ignore_context: if p[0][0] is ATTRIBUTE: steps = [_DOTSLASHSLASH] + p else: steps = [(DESCENDANT_OR_SELF, p[0][1], p[0][2])] + p[1:] elif p[0][0] is CHILD or p[0][0] is ATTRIBUTE \ or p[0][0] is DESCENDANT: steps = [_DOTSLASH] + p else: steps = p # for node it contains all positions of xpath expression # where its child should start checking for matches # with list of corresponding context counters # there can be many of them, because position that is from # descendant-like axis can be achieved from different nodes # for example <a><a><b/></a></a> should match both //a//b[1] # and //a//b[2] # positions always form increasing sequence (invariant) stack = [[(0, [[]])]] def _test(event, namespaces, variables, updateonly=False): kind, data, pos = event[:3] retval = None # Manage the stack that tells us "where we are" in the stream if kind is END: if stack: stack.pop() return None if kind is START_NS or kind is END_NS \ or kind is START_CDATA or kind is END_CDATA: # should we make namespaces work? return None pos_queue = deque([(pos, cou, []) for pos, cou in stack[-1]]) next_pos = [] # length of real part of path - we omit attribute axis real_len = len(steps) - ((steps[-1][0] == ATTRIBUTE) or 1 and 0) last_checked = -1 # places where we have to check for match, are these # provided by parent while pos_queue: x, pcou, mcou = pos_queue.popleft() axis, nodetest, predicates = steps[x] # we need to push descendant-like positions from parent # further if (axis is DESCENDANT or axis is DESCENDANT_OR_SELF) and pcou: if next_pos and next_pos[-1][0] == x: next_pos[-1][1].extend(pcou) else: next_pos.append((x, pcou)) # nodetest first if not nodetest(kind, data, pos, namespaces, variables): continue # counters packs that were already bad missed = set() counters_len = len(pcou) + len(mcou) # number of counters - we have to create one # for every context position based predicate cnum = 0 # tells if we have match with position x matched = True if predicates: for predicate in predicates: pretval = predicate(kind, data, pos, namespaces, variables) if type(pretval) is float: # FIXME <- need to check # this for other types that # can be coerced to float # each counter pack needs to be checked for i, cou in enumerate(chain(pcou, mcou)): # it was bad before if i in missed: continue if len(cou) < cnum + 1: cou.append(0) cou[cnum] += 1 # it is bad now if cou[cnum] != int(pretval): missed.add(i) # none of counters pack was good if len(missed) == counters_len: pretval = False cnum += 1 if not pretval: matched = False break if not matched: continue # counter for next position with current node as context node child_counter = [] if x + 1 == real_len: # we reached end of expression, because x + 1 # is equal to the length of expression matched = True axis, nodetest, predicates = steps[-1] if axis is ATTRIBUTE: matched = nodetest(kind, data, pos, namespaces, variables) if matched: retval = matched else: next_axis = steps[x + 1][0] # if next axis allows matching self we have # to add next position to our queue if next_axis is DESCENDANT_OR_SELF or next_axis is SELF: if not pos_queue or pos_queue[0][0] > x + 1: pos_queue.appendleft((x + 1, [], [child_counter])) else: pos_queue[0][2].append(child_counter) # if axis is not self we have to add it to child's list if next_axis is not SELF: next_pos.append((x + 1, [child_counter])) if kind is START: stack.append(next_pos) return retval return _test class SimplePathStrategy(object): """Strategy for path with only local names, attributes and text nodes.""" @classmethod def supports(cls, path): if path[0][0] is ATTRIBUTE: return False allowed_tests = (LocalNameTest, CommentNodeTest, TextNodeTest) for _, nodetest, predicates in path: if predicates: return False if not isinstance(nodetest, allowed_tests): return False return True def __init__(self, path): # fragments is list of tuples (fragment, pi, attr, self_beginning) # fragment is list of nodetests for fragment of path with only # child:: axes between # pi is KMP partial match table for this fragment # attr is attribute nodetest if fragment ends with @ and None otherwise # self_beginning is True if axis for first fragment element # was self (first fragment) or descendant-or-self (farther fragment) self.fragments = [] self_beginning = False fragment = [] def nodes_equal(node1, node2): """Tests if two node tests are equal""" if type(node1) is not type(node2): return False if type(node1) == LocalNameTest: return node1.name == node2.name return True def calculate_pi(f): """KMP prefix calculation for table""" # the indexes in prefix table are shifted by one # in comparision with common implementations # pi[i] = NORMAL_PI[i + 1] if len(f) == 0: return [] pi = [0] s = 0 for i in range(1, len(f)): while s > 0 and not nodes_equal(f[s], f[i]): s = pi[s-1] if nodes_equal(f[s], f[i]): s += 1 pi.append(s) return pi for axis in path: if axis[0] is SELF: if len(fragment) != 0: # if element is not first in fragment it has to be # the same as previous one # for example child::a/self::b is always wrong if axis[1] != fragment[-1][1]: self.fragments = None return else: self_beginning = True fragment.append(axis[1]) elif axis[0] is CHILD: fragment.append(axis[1]) elif axis[0] is ATTRIBUTE: pi = calculate_pi(fragment) self.fragments.append((fragment, pi, axis[1], self_beginning)) # attribute has always to be at the end, so we can jump out return else: pi = calculate_pi(fragment) self.fragments.append((fragment, pi, None, self_beginning)) fragment = [axis[1]] if axis[0] is DESCENDANT: self_beginning = False else: # DESCENDANT_OR_SELF self_beginning = True pi = calculate_pi(fragment) self.fragments.append((fragment, pi, None, self_beginning)) def test(self, ignore_context): # stack of triples (fid, p, ic) # fid is index of current fragment # p is position in this fragment # ic is if we ignore context in this fragment stack = [] stack_push = stack.append stack_pop = stack.pop frags = self.fragments frags_len = len(frags) def _test(event, namespaces, variables, updateonly=False): # expression found impossible during init if frags is None: return None kind, data, pos = event[:3] # skip events we don't care about if kind is END: if stack: stack_pop() return None if kind is START_NS or kind is END_NS \ or kind is START_CDATA or kind is END_CDATA: return None if not stack: # root node, nothing on stack, special case fid = 0 # skip empty fragments (there can be actually only one) while not frags[fid][0]: fid += 1 p = 0 # empty fragment means descendant node at beginning ic = ignore_context or (fid > 0) # expression can match first node, if first axis is self::, # descendant-or-self:: or if ignore_context is True and # axis is not descendant:: if not frags[fid][3] and (not ignore_context or fid > 0): # axis is not self-beggining, we have to skip this node stack_push((fid, p, ic)) return None else: # take position of parent fid, p, ic = stack[-1] if fid is not None and not ic: # fragment not ignoring context - we can't jump back frag, pi, attrib, _ = frags[fid] frag_len = len(frag) if p == frag_len: # that probably means empty first fragment pass elif frag[p](kind, data, pos, namespaces, variables): # match, so we can go further p += 1 else: # not matched, so there will be no match in subtree fid, p = None, None if p == frag_len and fid + 1 != frags_len: # we made it to end of fragment, we can go to following fid += 1 p = 0 ic = True if fid is None: # there was no match in fragment not ignoring context if kind is START: stack_push((fid, p, ic)) return None if ic: # we are in fragment ignoring context while True: frag, pi, attrib, _ = frags[fid] frag_len = len(frag) # KMP new "character" while p > 0 and (p >= frag_len or not \ frag[p](kind, data, pos, namespaces, variables)): p = pi[p-1] if frag[p](kind, data, pos, namespaces, variables): p += 1 if p == frag_len: # end of fragment reached if fid + 1 == frags_len: # that was last fragment break else: fid += 1 p = 0 ic = True if not frags[fid][3]: # next fragment not self-beginning break else: break if kind is START: # we have to put new position on stack, for children if not ic and fid + 1 == frags_len and p == frag_len: # it is end of the only, not context ignoring fragment # so there will be no matches in subtree stack_push((None, None, ic)) else: stack_push((fid, p, ic)) # have we reached the end of the last fragment? if fid + 1 == frags_len and p == frag_len: if attrib: # attribute ended path, return value return attrib(kind, data, pos, namespaces, variables) return True return None return _test class SingleStepStrategy(object): @classmethod def supports(cls, path): return len(path) == 1 def __init__(self, path): self.path = path def test(self, ignore_context): steps = self.path if steps[0][0] is ATTRIBUTE: steps = [_DOTSLASH] + steps select_attr = steps[-1][0] is ATTRIBUTE and steps[-1][1] or None # for every position in expression stores counters' list # it is used for position based predicates counters = [] depth = [0] def _test(event, namespaces, variables, updateonly=False): kind, data, pos = event[:3] # Manage the stack that tells us "where we are" in the stream if kind is END: if not ignore_context: depth[0] -= 1 return None elif kind is START_NS or kind is END_NS \ or kind is START_CDATA or kind is END_CDATA: # should we make namespaces work? return None if not ignore_context: outside = (steps[0][0] is SELF and depth[0] != 0) \ or (steps[0][0] is CHILD and depth[0] != 1) \ or (steps[0][0] is DESCENDANT and depth[0] < 1) if kind is START: depth[0] += 1 if outside: return None axis, nodetest, predicates = steps[0] if not nodetest(kind, data, pos, namespaces, variables): return None if predicates: cnum = 0 for predicate in predicates: pretval = predicate(kind, data, pos, namespaces, variables) if type(pretval) is float: # FIXME <- need to check this # for other types that can be # coerced to float if len(counters) < cnum + 1: counters.append(0) counters[cnum] += 1 if counters[cnum] != int(pretval): pretval = False cnum += 1 if not pretval: return None if select_attr: return select_attr(kind, data, pos, namespaces, variables) return True return _test class Path(object): """Implements basic XPath support on streams. Instances of this class represent a "compiled" XPath expression, and provide methods for testing the path against a stream, as well as extracting a substream matching that path. """ STRATEGIES = (SingleStepStrategy, SimplePathStrategy, GenericStrategy) def __init__(self, text, filename=None, lineno=-1): """Create the path object from a string. :param text: the path expression :param filename: the name of the file in which the path expression was found (used in error messages) :param lineno: the line on which the expression was found """ self.source = text self.paths = PathParser(text, filename, lineno).parse() self.strategies = [] for path in self.paths: for strategy_class in self.STRATEGIES: if strategy_class.supports(path): self.strategies.append(strategy_class(path)) break else: raise NotImplemented('No strategy found for path') def __repr__(self): paths = [] for path in self.paths: steps = [] for axis, nodetest, predicates in path: steps.append('%s::%s' % (axis, nodetest)) for predicate in predicates: steps[-1] += '[%s]' % predicate paths.append('/'.join(steps)) return '<%s "%s">' % (type(self).__name__, '|'.join(paths)) def select(self, stream, namespaces=None, variables=None): """Returns a substream of the given stream that matches the path. If there are no matches, this method returns an empty stream. >>> from genshi.input import XML >>> xml = XML('<root><elem><child>Text</child></elem></root>') >>> print(Path('.//child').select(xml)) <child>Text</child> >>> print(Path('.//child/text()').select(xml)) Text :param stream: the stream to select from :param namespaces: (optional) a mapping of namespace prefixes to URIs :param variables: (optional) a mapping of variable names to values :return: the substream matching the path, or an empty stream :rtype: `Stream` """ if namespaces is None: namespaces = {} if variables is None: variables = {} stream = iter(stream) def _generate(stream=stream, ns=namespaces, vs=variables): next = stream.next test = self.test() for event in stream: result = test(event, ns, vs) if result is True: yield event if event[0] is START: depth = 1 while depth > 0: subevent = next() if subevent[0] is START: depth += 1 elif subevent[0] is END: depth -= 1 yield subevent test(subevent, ns, vs, updateonly=True) elif result: yield result return Stream(_generate(), serializer=getattr(stream, 'serializer', None)) def test(self, ignore_context=False): """Returns a function that can be used to track whether the path matches a specific stream event. The function returned expects the positional arguments ``event``, ``namespaces`` and ``variables``. The first is a stream event, while the latter two are a mapping of namespace prefixes to URIs, and a mapping of variable names to values, respectively. In addition, the function accepts an ``updateonly`` keyword argument that default to ``False``. If it is set to ``True``, the function only updates its internal state, but does not perform any tests or return a result. If the path matches the event, the function returns the match (for example, a `START` or `TEXT` event.) Otherwise, it returns ``None``. >>> from genshi.input import XML >>> xml = XML('<root><elem><child id="1"/></elem><child id="2"/></root>') >>> test = Path('child').test() >>> namespaces, variables = {}, {} >>> for event in xml: ... if test(event, namespaces, variables): ... print('%s %r' % (event[0], event[1])) START (QName('child'), Attrs([(QName('id'), u'2')])) :param ignore_context: if `True`, the path is interpreted like a pattern in XSLT, meaning for example that it will match at any depth :return: a function that can be used to test individual events in a stream against the path :rtype: ``function`` """ tests = [s.test(ignore_context) for s in self.strategies] if len(tests) == 1: return tests[0] def _multi(event, namespaces, variables, updateonly=False): retval = None for test in tests: val = test(event, namespaces, variables, updateonly=updateonly) if retval is None: retval = val return retval return _multi class PathSyntaxError(Exception): """Exception raised when an XPath expression is syntactically incorrect.""" def __init__(self, message, filename=None, lineno=-1, offset=-1): if filename: message = '%s (%s, line %d)' % (message, filename, lineno) Exception.__init__(self, message) self.filename = filename self.lineno = lineno self.offset = offset class PathParser(object): """Tokenizes and parses an XPath expression.""" _QUOTES = (("'", "'"), ('"', '"')) _TOKENS = ('::', ':', '..', '.', '//', '/', '[', ']', '()', '(', ')', '@', '=', '!=', '!', '|', ',', '>=', '>', '<=', '<', '$') _tokenize = re.compile('("[^"]*")|(\'[^\']*\')|((?:\d+)?\.\d+)|(%s)|([^%s\s]+)|\s+' % ( '|'.join([re.escape(t) for t in _TOKENS]), ''.join([re.escape(t[0]) for t in _TOKENS]))).findall def __init__(self, text, filename=None, lineno=-1): self.filename = filename self.lineno = lineno self.tokens = [t for t in [dqstr or sqstr or number or token or name for dqstr, sqstr, number, token, name in self._tokenize(text)] if t] self.pos = 0 # Tokenizer @property def at_end(self): return self.pos == len(self.tokens) - 1 @property def cur_token(self): return self.tokens[self.pos] def next_token(self): self.pos += 1 return self.tokens[self.pos] def peek_token(self): if not self.at_end: return self.tokens[self.pos + 1] return None # Recursive descent parser def parse(self): """Parses the XPath expression and returns a list of location path tests. For union expressions (such as `*|text()`), this function returns one test for each operand in the union. For patch expressions that don't use the union operator, the function always returns a list of size 1. Each path test in turn is a sequence of tests that correspond to the location steps, each tuples of the form `(axis, testfunc, predicates)` """ paths = [self._location_path()] while self.cur_token == '|': self.next_token() paths.append(self._location_path()) if not self.at_end: raise PathSyntaxError('Unexpected token %r after end of expression' % self.cur_token, self.filename, self.lineno) return paths def _location_path(self): steps = [] while True: if self.cur_token.startswith('/'): if not steps: if self.cur_token == '//': # hack to make //* match every node - also root self.next_token() axis, nodetest, predicates = self._location_step() steps.append((DESCENDANT_OR_SELF, nodetest, predicates)) if self.at_end or not self.cur_token.startswith('/'): break continue else: raise PathSyntaxError('Absolute location paths not ' 'supported', self.filename, self.lineno) elif self.cur_token == '//': steps.append((DESCENDANT_OR_SELF, NodeTest(), [])) self.next_token() axis, nodetest, predicates = self._location_step() if not axis: axis = CHILD steps.append((axis, nodetest, predicates)) if self.at_end or not self.cur_token.startswith('/'): break return steps def _location_step(self): if self.cur_token == '@': axis = ATTRIBUTE self.next_token() elif self.cur_token == '.': axis = SELF elif self.cur_token == '..': raise PathSyntaxError('Unsupported axis "parent"', self.filename, self.lineno) elif self.peek_token() == '::': axis = Axis.forname(self.cur_token) if axis is None: raise PathSyntaxError('Unsupport axis "%s"' % axis, self.filename, self.lineno) self.next_token() self.next_token() else: axis = None nodetest = self._node_test(axis or CHILD) predicates = [] while self.cur_token == '[': predicates.append(self._predicate()) return axis, nodetest, predicates def _node_test(self, axis=None): test = prefix = None next_token = self.peek_token() if next_token in ('(', '()'): # Node type test test = self._node_type() elif next_token == ':': # Namespace prefix prefix = self.cur_token self.next_token() localname = self.next_token() if localname == '*': test = QualifiedPrincipalTypeTest(axis, prefix) else: test = QualifiedNameTest(axis, prefix, localname) else: # Name test if self.cur_token == '*': test = PrincipalTypeTest(axis) elif self.cur_token == '.': test = NodeTest() else: test = LocalNameTest(axis, self.cur_token) if not self.at_end: self.next_token() return test def _node_type(self): name = self.cur_token self.next_token() args = [] if self.cur_token != '()': # The processing-instruction() function optionally accepts the # name of the PI as argument, which must be a literal string self.next_token() # ( if self.cur_token != ')': string = self.cur_token if (string[0], string[-1]) in self._QUOTES: string = string[1:-1] args.append(string) cls = _nodetest_map.get(name) if not cls: raise PathSyntaxError('%s() not allowed here' % name, self.filename, self.lineno) return cls(*args) def _predicate(self): assert self.cur_token == '[' self.next_token() expr = self._or_expr() if self.cur_token != ']': raise PathSyntaxError('Expected "]" to close predicate, ' 'but found "%s"' % self.cur_token, self.filename, self.lineno) if not self.at_end: self.next_token() return expr def _or_expr(self): expr = self._and_expr() while self.cur_token == 'or': self.next_token() expr = OrOperator(expr, self._and_expr()) return expr def _and_expr(self): expr = self._equality_expr() while self.cur_token == 'and': self.next_token() expr = AndOperator(expr, self._equality_expr()) return expr def _equality_expr(self): expr = self._relational_expr() while self.cur_token in ('=', '!='): op = _operator_map[self.cur_token] self.next_token() expr = op(expr, self._relational_expr()) return expr def _relational_expr(self): expr = self._sub_expr() while self.cur_token in ('>', '>=', '<', '>='): op = _operator_map[self.cur_token] self.next_token() expr = op(expr, self._sub_expr()) return expr def _sub_expr(self): token = self.cur_token if token != '(': return self._primary_expr() self.next_token() expr = self._or_expr() if self.cur_token != ')': raise PathSyntaxError('Expected ")" to close sub-expression, ' 'but found "%s"' % self.cur_token, self.filename, self.lineno) self.next_token() return expr def _primary_expr(self): token = self.cur_token if len(token) > 1 and (token[0], token[-1]) in self._QUOTES: self.next_token() return StringLiteral(token[1:-1]) elif token[0].isdigit() or token[0] == '.': self.next_token() return NumberLiteral(as_float(token)) elif token == '$': token = self.next_token() self.next_token() return VariableReference(token) elif not self.at_end and self.peek_token().startswith('('): return self._function_call() else: axis = None if token == '@': axis = ATTRIBUTE self.next_token() return self._node_test(axis) def _function_call(self): name = self.cur_token if self.next_token() == '()': args = [] else: assert self.cur_token == '(' self.next_token() args = [self._or_expr()] while self.cur_token == ',': self.next_token() args.append(self._or_expr()) if not self.cur_token == ')': raise PathSyntaxError('Expected ")" to close function argument ' 'list, but found "%s"' % self.cur_token, self.filename, self.lineno) self.next_token() cls = _function_map.get(name) if not cls: raise PathSyntaxError('Unsupported function "%s"' % name, self.filename, self.lineno) return cls(*args) # Type coercion def as_scalar(value): """Convert value to a scalar. If a single element Attrs() object is passed the value of the single attribute will be returned.""" if isinstance(value, Attrs): assert len(value) == 1 return value[0][1] else: return value def as_float(value): # FIXME - if value is a bool it will be coerced to 0.0 and consequently # compared as a float. This is probably not ideal. return float(as_scalar(value)) def as_long(value): return long(as_scalar(value)) def as_string(value): value = as_scalar(value) if value is False: return '' return unicode(value) def as_bool(value): return bool(as_scalar(value)) # Node tests class PrincipalTypeTest(object): """Node test that matches any event with the given principal type.""" __slots__ = ['principal_type'] def __init__(self, principal_type): self.principal_type = principal_type def __call__(self, kind, data, pos, namespaces, variables): if kind is START: if self.principal_type is ATTRIBUTE: return data[1] or None else: return True def __repr__(self): return '*' class QualifiedPrincipalTypeTest(object): """Node test that matches any event with the given principal type in a specific namespace.""" __slots__ = ['principal_type', 'prefix'] def __init__(self, principal_type, prefix): self.principal_type = principal_type self.prefix = prefix def __call__(self, kind, data, pos, namespaces, variables): namespace = Namespace(namespaces.get(self.prefix)) if kind is START: if self.principal_type is ATTRIBUTE and data[1]: return Attrs([(name, value) for name, value in data[1] if name in namespace]) or None else: return data[0] in namespace def __repr__(self): return '%s:*' % self.prefix class LocalNameTest(object): """Node test that matches any event with the given principal type and local name. """ __slots__ = ['principal_type', 'name'] def __init__(self, principal_type, name): self.principal_type = principal_type self.name = name def __call__(self, kind, data, pos, namespaces, variables): if kind is START: if self.principal_type is ATTRIBUTE and self.name in data[1]: return Attrs([(self.name, data[1].get(self.name))]) else: return data[0].localname == self.name def __repr__(self): return self.name class QualifiedNameTest(object): """Node test that matches any event with the given principal type and qualified name. """ __slots__ = ['principal_type', 'prefix', 'name'] def __init__(self, principal_type, prefix, name): self.principal_type = principal_type self.prefix = prefix self.name = name def __call__(self, kind, data, pos, namespaces, variables): qname = QName('%s}%s' % (namespaces.get(self.prefix), self.name)) if kind is START: if self.principal_type is ATTRIBUTE and qname in data[1]: return Attrs([(self.name, data[1].get(self.name))]) else: return data[0] == qname def __repr__(self): return '%s:%s' % (self.prefix, self.name) class CommentNodeTest(object): """Node test that matches any comment events.""" __slots__ = [] def __call__(self, kind, data, pos, namespaces, variables): return kind is COMMENT def __repr__(self): return 'comment()' class NodeTest(object): """Node test that matches any node.""" __slots__ = [] def __call__(self, kind, data, pos, namespaces, variables): if kind is START: return True return kind, data, pos def __repr__(self): return 'node()' class ProcessingInstructionNodeTest(object): """Node test that matches any processing instruction event.""" __slots__ = ['target'] def __init__(self, target=None): self.target = target def __call__(self, kind, data, pos, namespaces, variables): return kind is PI and (not self.target or data[0] == self.target) def __repr__(self): arg = '' if self.target: arg = '"' + self.target + '"' return 'processing-instruction(%s)' % arg class TextNodeTest(object): """Node test that matches any text event.""" __slots__ = [] def __call__(self, kind, data, pos, namespaces, variables): return kind is TEXT def __repr__(self): return 'text()' _nodetest_map = {'comment': CommentNodeTest, 'node': NodeTest, 'processing-instruction': ProcessingInstructionNodeTest, 'text': TextNodeTest} # Functions class Function(object): """Base class for function nodes in XPath expressions.""" class BooleanFunction(Function): """The `boolean` function, which converts its argument to a boolean value. """ __slots__ = ['expr'] _return_type = bool def __init__(self, expr): self.expr = expr def __call__(self, kind, data, pos, namespaces, variables): val = self.expr(kind, data, pos, namespaces, variables) return as_bool(val) def __repr__(self): return 'boolean(%r)' % self.expr class CeilingFunction(Function): """The `ceiling` function, which returns the nearest lower integer number for the given number. """ __slots__ = ['number'] def __init__(self, number): self.number = number def __call__(self, kind, data, pos, namespaces, variables): number = self.number(kind, data, pos, namespaces, variables) return ceil(as_float(number)) def __repr__(self): return 'ceiling(%r)' % self.number class ConcatFunction(Function): """The `concat` function, which concatenates (joins) the variable number of strings it gets as arguments. """ __slots__ = ['exprs'] def __init__(self, *exprs): self.exprs = exprs def __call__(self, kind, data, pos, namespaces, variables): strings = [] for item in [expr(kind, data, pos, namespaces, variables) for expr in self.exprs]: strings.append(as_string(item)) return ''.join(strings) def __repr__(self): return 'concat(%s)' % ', '.join([repr(expr) for expr in self.exprs]) class ContainsFunction(Function): """The `contains` function, which returns whether a string contains a given substring. """ __slots__ = ['string1', 'string2'] def __init__(self, string1, string2): self.string1 = string1 self.string2 = string2 def __call__(self, kind, data, pos, namespaces, variables): string1 = self.string1(kind, data, pos, namespaces, variables) string2 = self.string2(kind, data, pos, namespaces, variables) return as_string(string2) in as_string(string1) def __repr__(self): return 'contains(%r, %r)' % (self.string1, self.string2) class MatchesFunction(Function): """The `matches` function, which returns whether a string matches a regular expression. """ __slots__ = ['string1', 'string2'] flag_mapping = {'s': re.S, 'm': re.M, 'i': re.I, 'x': re.X} def __init__(self, string1, string2, flags=''): self.string1 = string1 self.string2 = string2 self.flags = self._map_flags(flags) def __call__(self, kind, data, pos, namespaces, variables): string1 = as_string(self.string1(kind, data, pos, namespaces, variables)) string2 = as_string(self.string2(kind, data, pos, namespaces, variables)) return re.search(string2, string1, self.flags) def _map_flags(self, flags): return reduce(operator.or_, [self.flag_map[flag] for flag in flags], re.U) def __repr__(self): return 'contains(%r, %r)' % (self.string1, self.string2) class FalseFunction(Function): """The `false` function, which always returns the boolean `false` value.""" __slots__ = [] def __call__(self, kind, data, pos, namespaces, variables): return False def __repr__(self): return 'false()' class FloorFunction(Function): """The `ceiling` function, which returns the nearest higher integer number for the given number. """ __slots__ = ['number'] def __init__(self, number): self.number = number def __call__(self, kind, data, pos, namespaces, variables): number = self.number(kind, data, pos, namespaces, variables) return floor(as_float(number)) def __repr__(self): return 'floor(%r)' % self.number class LocalNameFunction(Function): """The `local-name` function, which returns the local name of the current element. """ __slots__ = [] def __call__(self, kind, data, pos, namespaces, variables): if kind is START: return data[0].localname def __repr__(self): return 'local-name()' class NameFunction(Function): """The `name` function, which returns the qualified name of the current element. """ __slots__ = [] def __call__(self, kind, data, pos, namespaces, variables): if kind is START: return data[0] def __repr__(self): return 'name()' class NamespaceUriFunction(Function): """The `namespace-uri` function, which returns the namespace URI of the current element. """ __slots__ = [] def __call__(self, kind, data, pos, namespaces, variables): if kind is START: return data[0].namespace def __repr__(self): return 'namespace-uri()' class NotFunction(Function): """The `not` function, which returns the negated boolean value of its argument. """ __slots__ = ['expr'] def __init__(self, expr): self.expr = expr def __call__(self, kind, data, pos, namespaces, variables): return not as_bool(self.expr(kind, data, pos, namespaces, variables)) def __repr__(self): return 'not(%s)' % self.expr class NormalizeSpaceFunction(Function): """The `normalize-space` function, which removes leading and trailing whitespace in the given string, and replaces multiple adjacent whitespace characters inside the string with a single space. """ __slots__ = ['expr'] _normalize = re.compile(r'\s{2,}').sub def __init__(self, expr): self.expr = expr def __call__(self, kind, data, pos, namespaces, variables): string = self.expr(kind, data, pos, namespaces, variables) return self._normalize(' ', as_string(string).strip()) def __repr__(self): return 'normalize-space(%s)' % repr(self.expr) class NumberFunction(Function): """The `number` function that converts its argument to a number.""" __slots__ = ['expr'] def __init__(self, expr): self.expr = expr def __call__(self, kind, data, pos, namespaces, variables): val = self.expr(kind, data, pos, namespaces, variables) return as_float(val) def __repr__(self): return 'number(%r)' % self.expr class RoundFunction(Function): """The `round` function, which returns the nearest integer number for the given number. """ __slots__ = ['number'] def __init__(self, number): self.number = number def __call__(self, kind, data, pos, namespaces, variables): number = self.number(kind, data, pos, namespaces, variables) return round(as_float(number)) def __repr__(self): return 'round(%r)' % self.number class StartsWithFunction(Function): """The `starts-with` function that returns whether one string starts with a given substring. """ __slots__ = ['string1', 'string2'] def __init__(self, string1, string2): self.string1 = string1 self.string2 = string2 def __call__(self, kind, data, pos, namespaces, variables): string1 = self.string1(kind, data, pos, namespaces, variables) string2 = self.string2(kind, data, pos, namespaces, variables) return as_string(string1).startswith(as_string(string2)) def __repr__(self): return 'starts-with(%r, %r)' % (self.string1, self.string2) class StringLengthFunction(Function): """The `string-length` function that returns the length of the given string. """ __slots__ = ['expr'] def __init__(self, expr): self.expr = expr def __call__(self, kind, data, pos, namespaces, variables): string = self.expr(kind, data, pos, namespaces, variables) return len(as_string(string)) def __repr__(self): return 'string-length(%r)' % self.expr class SubstringFunction(Function): """The `substring` function that returns the part of a string that starts at the given offset, and optionally limited to the given length. """ __slots__ = ['string', 'start', 'length'] def __init__(self, string, start, length=None): self.string = string self.start = start self.length = length def __call__(self, kind, data, pos, namespaces, variables): string = self.string(kind, data, pos, namespaces, variables) start = self.start(kind, data, pos, namespaces, variables) length = 0 if self.length is not None: length = self.length(kind, data, pos, namespaces, variables) return string[as_long(start):len(as_string(string)) - as_long(length)] def __repr__(self): if self.length is not None: return 'substring(%r, %r, %r)' % (self.string, self.start, self.length) else: return 'substring(%r, %r)' % (self.string, self.start) class SubstringAfterFunction(Function): """The `substring-after` function that returns the part of a string that is found after the given substring. """ __slots__ = ['string1', 'string2'] def __init__(self, string1, string2): self.string1 = string1 self.string2 = string2 def __call__(self, kind, data, pos, namespaces, variables): string1 = as_string(self.string1(kind, data, pos, namespaces, variables)) string2 = as_string(self.string2(kind, data, pos, namespaces, variables)) index = string1.find(string2) if index >= 0: return string1[index + len(string2):] return '' def __repr__(self): return 'substring-after(%r, %r)' % (self.string1, self.string2) class SubstringBeforeFunction(Function): """The `substring-before` function that returns the part of a string that is found before the given substring. """ __slots__ = ['string1', 'string2'] def __init__(self, string1, string2): self.string1 = string1 self.string2 = string2 def __call__(self, kind, data, pos, namespaces, variables): string1 = as_string(self.string1(kind, data, pos, namespaces, variables)) string2 = as_string(self.string2(kind, data, pos, namespaces, variables)) index = string1.find(string2) if index >= 0: return string1[:index] return '' def __repr__(self): return 'substring-after(%r, %r)' % (self.string1, self.string2) class TranslateFunction(Function): """The `translate` function that translates a set of characters in a string to target set of characters. """ __slots__ = ['string', 'fromchars', 'tochars'] def __init__(self, string, fromchars, tochars): self.string = string self.fromchars = fromchars self.tochars = tochars def __call__(self, kind, data, pos, namespaces, variables): string = as_string(self.string(kind, data, pos, namespaces, variables)) fromchars = as_string(self.fromchars(kind, data, pos, namespaces, variables)) tochars = as_string(self.tochars(kind, data, pos, namespaces, variables)) table = dict(zip([ord(c) for c in fromchars], [ord(c) for c in tochars])) return string.translate(table) def __repr__(self): return 'translate(%r, %r, %r)' % (self.string, self.fromchars, self.tochars) class TrueFunction(Function): """The `true` function, which always returns the boolean `true` value.""" __slots__ = [] def __call__(self, kind, data, pos, namespaces, variables): return True def __repr__(self): return 'true()' _function_map = {'boolean': BooleanFunction, 'ceiling': CeilingFunction, 'concat': ConcatFunction, 'contains': ContainsFunction, 'matches': MatchesFunction, 'false': FalseFunction, 'floor': FloorFunction, 'local-name': LocalNameFunction, 'name': NameFunction, 'namespace-uri': NamespaceUriFunction, 'normalize-space': NormalizeSpaceFunction, 'not': NotFunction, 'number': NumberFunction, 'round': RoundFunction, 'starts-with': StartsWithFunction, 'string-length': StringLengthFunction, 'substring': SubstringFunction, 'substring-after': SubstringAfterFunction, 'substring-before': SubstringBeforeFunction, 'translate': TranslateFunction, 'true': TrueFunction} # Literals & Variables class Literal(object): """Abstract base class for literal nodes.""" class StringLiteral(Literal): """A string literal node.""" __slots__ = ['text'] def __init__(self, text): self.text = text def __call__(self, kind, data, pos, namespaces, variables): return self.text def __repr__(self): return '"%s"' % self.text class NumberLiteral(Literal): """A number literal node.""" __slots__ = ['number'] def __init__(self, number): self.number = number def __call__(self, kind, data, pos, namespaces, variables): return self.number def __repr__(self): return str(self.number) class VariableReference(Literal): """A variable reference node.""" __slots__ = ['name'] def __init__(self, name): self.name = name def __call__(self, kind, data, pos, namespaces, variables): return variables.get(self.name) def __repr__(self): return str(self.name) # Operators class AndOperator(object): """The boolean operator `and`.""" __slots__ = ['lval', 'rval'] def __init__(self, lval, rval): self.lval = lval self.rval = rval def __call__(self, kind, data, pos, namespaces, variables): lval = as_bool(self.lval(kind, data, pos, namespaces, variables)) if not lval: return False rval = self.rval(kind, data, pos, namespaces, variables) return as_bool(rval) def __repr__(self): return '%s and %s' % (self.lval, self.rval) class EqualsOperator(object): """The equality operator `=`.""" __slots__ = ['lval', 'rval'] def __init__(self, lval, rval): self.lval = lval self.rval = rval def __call__(self, kind, data, pos, namespaces, variables): lval = as_scalar(self.lval(kind, data, pos, namespaces, variables)) rval = as_scalar(self.rval(kind, data, pos, namespaces, variables)) return lval == rval def __repr__(self): return '%s=%s' % (self.lval, self.rval) class NotEqualsOperator(object): """The equality operator `!=`.""" __slots__ = ['lval', 'rval'] def __init__(self, lval, rval): self.lval = lval self.rval = rval def __call__(self, kind, data, pos, namespaces, variables): lval = as_scalar(self.lval(kind, data, pos, namespaces, variables)) rval = as_scalar(self.rval(kind, data, pos, namespaces, variables)) return lval != rval def __repr__(self): return '%s!=%s' % (self.lval, self.rval) class OrOperator(object): """The boolean operator `or`.""" __slots__ = ['lval', 'rval'] def __init__(self, lval, rval): self.lval = lval self.rval = rval def __call__(self, kind, data, pos, namespaces, variables): lval = as_bool(self.lval(kind, data, pos, namespaces, variables)) if lval: return True rval = self.rval(kind, data, pos, namespaces, variables) return as_bool(rval) def __repr__(self): return '%s or %s' % (self.lval, self.rval) class GreaterThanOperator(object): """The relational operator `>` (greater than).""" __slots__ = ['lval', 'rval'] def __init__(self, lval, rval): self.lval = lval self.rval = rval def __call__(self, kind, data, pos, namespaces, variables): lval = self.lval(kind, data, pos, namespaces, variables) rval = self.rval(kind, data, pos, namespaces, variables) return as_float(lval) > as_float(rval) def __repr__(self): return '%s>%s' % (self.lval, self.rval) class GreaterThanOrEqualOperator(object): """The relational operator `>=` (greater than or equal).""" __slots__ = ['lval', 'rval'] def __init__(self, lval, rval): self.lval = lval self.rval = rval def __call__(self, kind, data, pos, namespaces, variables): lval = self.lval(kind, data, pos, namespaces, variables) rval = self.rval(kind, data, pos, namespaces, variables) return as_float(lval) >= as_float(rval) def __repr__(self): return '%s>=%s' % (self.lval, self.rval) class LessThanOperator(object): """The relational operator `<` (less than).""" __slots__ = ['lval', 'rval'] def __init__(self, lval, rval): self.lval = lval self.rval = rval def __call__(self, kind, data, pos, namespaces, variables): lval = self.lval(kind, data, pos, namespaces, variables) rval = self.rval(kind, data, pos, namespaces, variables) return as_float(lval) < as_float(rval) def __repr__(self): return '%s<%s' % (self.lval, self.rval) class LessThanOrEqualOperator(object): """The relational operator `<=` (less than or equal).""" __slots__ = ['lval', 'rval'] def __init__(self, lval, rval): self.lval = lval self.rval = rval def __call__(self, kind, data, pos, namespaces, variables): lval = self.lval(kind, data, pos, namespaces, variables) rval = self.rval(kind, data, pos, namespaces, variables) return as_float(lval) <= as_float(rval) def __repr__(self): return '%s<=%s' % (self.lval, self.rval) _operator_map = {'=': EqualsOperator, '!=': NotEqualsOperator, '>': GreaterThanOperator, '>=': GreaterThanOrEqualOperator, '<': LessThanOperator, '>=': LessThanOrEqualOperator} _DOTSLASHSLASH = (DESCENDANT_OR_SELF, PrincipalTypeTest(None), ()) _DOTSLASH = (SELF, PrincipalTypeTest(None), ())
[ [ 8, 0, 0.0173, 0.017, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0268, 0.0007, 0, 0.66, 0.0135, 193, 0, 1, 0, 0, 193, 0, 0 ], [ 7, 0, 0.0285, 0.0026, 0, 0.66,...
[ "\"\"\"Basic support for evaluating XPath expressions against streams.\n\n>>> from genshi.input import XML\n>>> doc = XML('''<doc>\n... <items count=\"4\">\n... <item status=\"new\">\n... <summary>Foo</summary>\n... </item>", "from collections import deque", "try:\n reduce # builtin in P...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import os import shutil import tempfile import unittest from genshi.template.base import TemplateSyntaxError from genshi.template.loader import TemplateLoader from genshi.template.text import OldTextTemplate, NewTextTemplate class OldTextTemplateTestCase(unittest.TestCase): """Tests for text template processing.""" def setUp(self): self.dirname = tempfile.mkdtemp(suffix='markup_test') def tearDown(self): shutil.rmtree(self.dirname) def test_escaping(self): tmpl = OldTextTemplate('\\#escaped') self.assertEqual('#escaped', tmpl.generate().render(encoding=None)) def test_comment(self): tmpl = OldTextTemplate('## a comment') self.assertEqual('', tmpl.generate().render(encoding=None)) def test_comment_escaping(self): tmpl = OldTextTemplate('\\## escaped comment') self.assertEqual('## escaped comment', tmpl.generate().render(encoding=None)) def test_end_with_args(self): tmpl = OldTextTemplate(""" #if foo bar #end 'if foo'""") self.assertEqual('\n', tmpl.generate(foo=False).render(encoding=None)) def test_latin1_encoded(self): text = u'$foo\xf6$bar'.encode('iso-8859-1') tmpl = OldTextTemplate(text, encoding='iso-8859-1') self.assertEqual(u'x\xf6y', tmpl.generate(foo='x', bar='y').render(encoding=None)) def test_unicode_input(self): text = u'$foo\xf6$bar' tmpl = OldTextTemplate(text) self.assertEqual(u'x\xf6y', tmpl.generate(foo='x', bar='y').render(encoding=None)) def test_empty_lines1(self): tmpl = OldTextTemplate("""Your items: #for item in items * ${item} #end""") self.assertEqual("""Your items: * 0 * 1 * 2 """, tmpl.generate(items=range(3)).render(encoding=None)) def test_empty_lines2(self): tmpl = OldTextTemplate("""Your items: #for item in items * ${item} #end""") self.assertEqual("""Your items: * 0 * 1 * 2 """, tmpl.generate(items=range(3)).render(encoding=None)) def test_include(self): file1 = open(os.path.join(self.dirname, 'tmpl1.txt'), 'w') try: file1.write("Included\n") finally: file1.close() file2 = open(os.path.join(self.dirname, 'tmpl2.txt'), 'w') try: file2.write("""----- Included data below this line ----- #include tmpl1.txt ----- Included data above this line -----""") finally: file2.close() loader = TemplateLoader([self.dirname]) tmpl = loader.load('tmpl2.txt', cls=OldTextTemplate) self.assertEqual("""----- Included data below this line ----- Included ----- Included data above this line -----""", tmpl.generate().render(encoding=None)) class NewTextTemplateTestCase(unittest.TestCase): """Tests for text template processing.""" def setUp(self): self.dirname = tempfile.mkdtemp(suffix='markup_test') def tearDown(self): shutil.rmtree(self.dirname) def test_escaping(self): tmpl = NewTextTemplate('\\{% escaped %}') self.assertEqual('{% escaped %}', tmpl.generate().render(encoding=None)) def test_comment(self): tmpl = NewTextTemplate('{# a comment #}') self.assertEqual('', tmpl.generate().render(encoding=None)) def test_comment_escaping(self): tmpl = NewTextTemplate('\\{# escaped comment #}') self.assertEqual('{# escaped comment #}', tmpl.generate().render(encoding=None)) def test_end_with_args(self): tmpl = NewTextTemplate(""" {% if foo %} bar {% end 'if foo' %}""") self.assertEqual('\n', tmpl.generate(foo=False).render(encoding=None)) def test_latin1_encoded(self): text = u'$foo\xf6$bar'.encode('iso-8859-1') tmpl = NewTextTemplate(text, encoding='iso-8859-1') self.assertEqual(u'x\xf6y', tmpl.generate(foo='x', bar='y').render(encoding=None)) def test_unicode_input(self): text = u'$foo\xf6$bar' tmpl = NewTextTemplate(text) self.assertEqual(u'x\xf6y', tmpl.generate(foo='x', bar='y').render(encoding=None)) def test_empty_lines1(self): tmpl = NewTextTemplate("""Your items: {% for item in items %}\ * ${item} {% end %}""") self.assertEqual("""Your items: * 0 * 1 * 2 """, tmpl.generate(items=range(3)).render(encoding=None)) def test_empty_lines2(self): tmpl = NewTextTemplate("""Your items: {% for item in items %}\ * ${item} {% end %}""") self.assertEqual("""Your items: * 0 * 1 * 2 """, tmpl.generate(items=range(3)).render(encoding=None)) def test_exec_with_trailing_space(self): """ Verify that a code block with trailing space does not cause a syntax error (see ticket #127). """ NewTextTemplate(""" {% python bar = 42 $} """) def test_exec_import(self): tmpl = NewTextTemplate("""{% python from datetime import timedelta %} ${timedelta(days=2)} """) self.assertEqual(""" 2 days, 0:00:00 """, tmpl.generate().render(encoding=None)) def test_exec_def(self): tmpl = NewTextTemplate("""{% python def foo(): return 42 %} ${foo()} """) self.assertEqual(""" 42 """, tmpl.generate().render(encoding=None)) def test_include(self): file1 = open(os.path.join(self.dirname, 'tmpl1.txt'), 'w') try: file1.write("Included") finally: file1.close() file2 = open(os.path.join(self.dirname, 'tmpl2.txt'), 'w') try: file2.write("""----- Included data below this line ----- {% include tmpl1.txt %} ----- Included data above this line -----""") finally: file2.close() loader = TemplateLoader([self.dirname]) tmpl = loader.load('tmpl2.txt', cls=NewTextTemplate) self.assertEqual("""----- Included data below this line ----- Included ----- Included data above this line -----""", tmpl.generate().render(encoding=None)) def test_include_expr(self): file1 = open(os.path.join(self.dirname, 'tmpl1.txt'), 'w') try: file1.write("Included") finally: file1.close() file2 = open(os.path.join(self.dirname, 'tmpl2.txt'), 'w') try: file2.write("""----- Included data below this line ----- {% include ${'%s.txt' % ('tmpl1',)} %} ----- Included data above this line -----""") finally: file2.close() loader = TemplateLoader([self.dirname]) tmpl = loader.load('tmpl2.txt', cls=NewTextTemplate) self.assertEqual("""----- Included data below this line ----- Included ----- Included data above this line -----""", tmpl.generate().render(encoding=None)) def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(NewTextTemplate.__module__)) suite.addTest(unittest.makeSuite(OldTextTemplateTestCase, 'test')) suite.addTest(unittest.makeSuite(NewTextTemplateTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.0511, 0.0036, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0547, 0.0036, 0, 0.66, 0.0909, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0584, 0.0036, 0, ...
[ "import doctest", "import os", "import shutil", "import tempfile", "import unittest", "from genshi.template.base import TemplateSyntaxError", "from genshi.template.loader import TemplateLoader", "from genshi.template.text import OldTextTemplate, NewTextTemplate", "class OldTextTemplateTestCase(unitt...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2007 Edgewall Software # Copyright (C) 2006 Matthew Good # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import os import unittest from genshi.core import Stream from genshi.output import DocType from genshi.template import MarkupTemplate, TextTemplate, NewTextTemplate from genshi.template.plugin import ConfigurationError, \ MarkupTemplateEnginePlugin, \ TextTemplateEnginePlugin PACKAGE = 'genshi.template.tests' class MarkupTemplateEnginePluginTestCase(unittest.TestCase): def test_init_no_options(self): plugin = MarkupTemplateEnginePlugin() self.assertEqual('utf-8', plugin.default_encoding) self.assertEqual('html', plugin.default_format) self.assertEqual(None, plugin.default_doctype) self.assertEqual([], plugin.loader.search_path) self.assertEqual(True, plugin.loader.auto_reload) self.assertEqual(25, plugin.loader._cache.capacity) def test_init_with_loader_options(self): plugin = MarkupTemplateEnginePlugin(options={ 'genshi.auto_reload': 'off', 'genshi.max_cache_size': '100', 'genshi.search_path': '/usr/share/tmpl:/usr/local/share/tmpl', }) self.assertEqual(['/usr/share/tmpl', '/usr/local/share/tmpl'], plugin.loader.search_path) self.assertEqual(False, plugin.loader.auto_reload) self.assertEqual(100, plugin.loader._cache.capacity) def test_init_with_invalid_cache_size(self): self.assertRaises(ConfigurationError, MarkupTemplateEnginePlugin, options={'genshi.max_cache_size': 'thirty'}) def test_init_with_output_options(self): plugin = MarkupTemplateEnginePlugin(options={ 'genshi.default_encoding': 'iso-8859-15', 'genshi.default_format': 'xhtml', 'genshi.default_doctype': 'xhtml-strict', }) self.assertEqual('iso-8859-15', plugin.default_encoding) self.assertEqual('xhtml', plugin.default_format) self.assertEqual(DocType.XHTML, plugin.default_doctype) def test_init_with_invalid_output_format(self): self.assertRaises(ConfigurationError, MarkupTemplateEnginePlugin, options={'genshi.default_format': 'foobar'}) def test_init_with_invalid_doctype(self): self.assertRaises(ConfigurationError, MarkupTemplateEnginePlugin, options={'genshi.default_doctype': 'foobar'}) def test_load_template_from_file(self): plugin = MarkupTemplateEnginePlugin() tmpl = plugin.load_template(PACKAGE + '.templates.test') self.assertEqual('test.html', os.path.basename(tmpl.filename)) assert isinstance(tmpl, MarkupTemplate) def test_load_template_from_string(self): plugin = MarkupTemplateEnginePlugin() tmpl = plugin.load_template(None, template_string="""<p> $message </p>""") self.assertEqual(None, tmpl.filename) assert isinstance(tmpl, MarkupTemplate) def test_transform_with_load(self): plugin = MarkupTemplateEnginePlugin() tmpl = plugin.load_template(PACKAGE + '.templates.test') stream = plugin.transform({'message': 'Hello'}, tmpl) assert isinstance(stream, Stream) def test_transform_without_load(self): plugin = MarkupTemplateEnginePlugin() stream = plugin.transform({'message': 'Hello'}, PACKAGE + '.templates.test') assert isinstance(stream, Stream) def test_render(self): plugin = MarkupTemplateEnginePlugin() tmpl = plugin.load_template(PACKAGE + '.templates.test') output = plugin.render({'message': 'Hello'}, template=tmpl) self.assertEqual("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html lang="en"> <head> <title>Test</title> </head> <body> <h1>Test</h1> <p>Hello</p> </body> </html>""", output) def test_render_with_format(self): plugin = MarkupTemplateEnginePlugin() tmpl = plugin.load_template(PACKAGE + '.templates.test') output = plugin.render({'message': 'Hello'}, format='xhtml', template=tmpl) self.assertEqual("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" lang="en"> <head> <title>Test</title> </head> <body> <h1>Test</h1> <p>Hello</p> </body> </html>""", output) def test_render_with_doctype(self): plugin = MarkupTemplateEnginePlugin(options={ 'genshi.default_doctype': 'html-strict', }) tmpl = plugin.load_template(PACKAGE + '.templates.test') output = plugin.render({'message': 'Hello'}, template=tmpl) self.assertEqual("""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html lang="en"> <head> <title>Test</title> </head> <body> <h1>Test</h1> <p>Hello</p> </body> </html>""", output) def test_render_fragment_with_doctype(self): plugin = MarkupTemplateEnginePlugin(options={ 'genshi.default_doctype': 'html-strict', }) tmpl = plugin.load_template(PACKAGE + '.templates.test_no_doctype') output = plugin.render({'message': 'Hello'}, template=tmpl, fragment=True) self.assertEqual("""<html lang="en"> <head> <title>Test</title> </head> <body> <h1>Test</h1> <p>Hello</p> </body> </html>""", output) def test_helper_functions(self): plugin = MarkupTemplateEnginePlugin() tmpl = plugin.load_template(PACKAGE + '.templates.functions') output = plugin.render({'snippet': '<b>Foo</b>'}, template=tmpl) self.assertEqual("""<div> False bar <b>Foo</b> <b>Foo</b> </div>""", output) class TextTemplateEnginePluginTestCase(unittest.TestCase): def test_init_no_options(self): plugin = TextTemplateEnginePlugin() self.assertEqual('utf-8', plugin.default_encoding) self.assertEqual('text', plugin.default_format) self.assertEqual([], plugin.loader.search_path) self.assertEqual(True, plugin.loader.auto_reload) self.assertEqual(25, plugin.loader._cache.capacity) def test_init_with_loader_options(self): plugin = TextTemplateEnginePlugin(options={ 'genshi.auto_reload': 'off', 'genshi.max_cache_size': '100', 'genshi.search_path': '/usr/share/tmpl:/usr/local/share/tmpl', }) self.assertEqual(['/usr/share/tmpl', '/usr/local/share/tmpl'], plugin.loader.search_path) self.assertEqual(False, plugin.loader.auto_reload) self.assertEqual(100, plugin.loader._cache.capacity) def test_init_with_output_options(self): plugin = TextTemplateEnginePlugin(options={ 'genshi.default_encoding': 'iso-8859-15', }) self.assertEqual('iso-8859-15', plugin.default_encoding) def test_init_with_new_syntax(self): plugin = TextTemplateEnginePlugin(options={ 'genshi.new_text_syntax': 'yes', }) self.assertEqual(NewTextTemplate, plugin.template_class) tmpl = plugin.load_template(PACKAGE + '.templates.new_syntax') output = plugin.render({'foo': True}, template=tmpl) self.assertEqual('bar', output) def test_load_template_from_file(self): plugin = TextTemplateEnginePlugin() tmpl = plugin.load_template(PACKAGE + '.templates.test') assert isinstance(tmpl, TextTemplate) self.assertEqual('test.txt', os.path.basename(tmpl.filename)) def test_load_template_from_string(self): plugin = TextTemplateEnginePlugin() tmpl = plugin.load_template(None, template_string="$message") self.assertEqual(None, tmpl.filename) assert isinstance(tmpl, TextTemplate) def test_transform_without_load(self): plugin = TextTemplateEnginePlugin() stream = plugin.transform({'message': 'Hello'}, PACKAGE + '.templates.test') assert isinstance(stream, Stream) def test_transform_with_load(self): plugin = TextTemplateEnginePlugin() tmpl = plugin.load_template(PACKAGE + '.templates.test') stream = plugin.transform({'message': 'Hello'}, tmpl) assert isinstance(stream, Stream) def test_render(self): plugin = TextTemplateEnginePlugin() tmpl = plugin.load_template(PACKAGE + '.templates.test') output = plugin.render({'message': 'Hello'}, template=tmpl) self.assertEqual("""Test ==== Hello """, output) def test_helper_functions(self): plugin = TextTemplateEnginePlugin() tmpl = plugin.load_template(PACKAGE + '.templates.functions') output = plugin.render({}, template=tmpl) self.assertEqual("""False bar """, output) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(MarkupTemplateEnginePluginTestCase, 'test')) suite.addTest(unittest.makeSuite(TextTemplateEnginePluginTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.0568, 0.0038, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0606, 0.0038, 0, 0.66, 0.0909, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0644, 0.0038, 0, ...
[ "import doctest", "import os", "import unittest", "from genshi.core import Stream", "from genshi.output import DocType", "from genshi.template import MarkupTemplate, TextTemplate, NewTextTemplate", "from genshi.template.plugin import ConfigurationError, \\\n MarkupTempl...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2007 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import unittest from genshi.template.base import Template def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(Template.__module__)) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.56, 0.04, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.6, 0.04, 0, 0.66, 0.25, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.68, 0.04, 0, 0.66, 0.5, ...
[ "import doctest", "import unittest", "from genshi.template.base import Template", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(doctest.DocTestSuite(Template.__module__))\n return suite", " suite = unittest.TestSuite()", " suite.addTest(doctest.DocTestSuite(Template.__module_...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2007 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import unittest def suite(): from genshi.template.tests import base, directives, eval, interpolation, \ loader, markup, plugin, text suite = unittest.TestSuite() suite.addTest(base.suite()) suite.addTest(directives.suite()) suite.addTest(eval.suite()) suite.addTest(interpolation.suite()) suite.addTest(loader.suite()) suite.addTest(markup.suite()) suite.addTest(plugin.suite()) suite.addTest(text.suite()) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.4375, 0.0312, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.4688, 0.0312, 0, 0.66, 0.3333, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 2, 0, 0.7188, 0.4062, 0, 0....
[ "import doctest", "import unittest", "def suite():\n from genshi.template.tests import base, directives, eval, interpolation, \\\n loader, markup, plugin, text\n suite = unittest.TestSuite()\n suite.addTest(base.suite())\n suite.addTest(directives.suite())\n s...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2008 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import os import shutil import tempfile import unittest from genshi.core import TEXT from genshi.template.loader import TemplateLoader from genshi.template.markup import MarkupTemplate class TemplateLoaderTestCase(unittest.TestCase): """Tests for the template loader.""" def setUp(self): self.dirname = tempfile.mkdtemp(suffix='markup_test') def tearDown(self): shutil.rmtree(self.dirname) def test_search_path_empty(self): loader = TemplateLoader() self.assertEqual([], loader.search_path) def test_search_path_as_string(self): loader = TemplateLoader(self.dirname) self.assertEqual([self.dirname], loader.search_path) def test_relative_include_samedir(self): file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w') try: file1.write("""<div>Included</div>""") finally: file1.close() file2 = open(os.path.join(self.dirname, 'tmpl2.html'), 'w') try: file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="tmpl1.html" /> </html>""") finally: file2.close() loader = TemplateLoader([self.dirname]) tmpl = loader.load('tmpl2.html') self.assertEqual("""<html> <div>Included</div> </html>""", tmpl.generate().render(encoding=None)) def test_relative_include_subdir(self): os.mkdir(os.path.join(self.dirname, 'sub')) file1 = open(os.path.join(self.dirname, 'sub', 'tmpl1.html'), 'w') try: file1.write("""<div>Included</div>""") finally: file1.close() file2 = open(os.path.join(self.dirname, 'tmpl2.html'), 'w') try: file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="sub/tmpl1.html" /> </html>""") finally: file2.close() loader = TemplateLoader([self.dirname]) tmpl = loader.load('tmpl2.html') self.assertEqual("""<html> <div>Included</div> </html>""", tmpl.generate().render(encoding=None)) def test_relative_include_parentdir(self): file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w') try: file1.write("""<div>Included</div>""") finally: file1.close() os.mkdir(os.path.join(self.dirname, 'sub')) file2 = open(os.path.join(self.dirname, 'sub', 'tmpl2.html'), 'w') try: file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="../tmpl1.html" /> </html>""") finally: file2.close() loader = TemplateLoader([self.dirname]) tmpl = loader.load('sub/tmpl2.html') self.assertEqual("""<html> <div>Included</div> </html>""", tmpl.generate().render(encoding=None)) def test_relative_include_samesubdir(self): file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w') try: file1.write("""<div>Included tmpl1.html</div>""") finally: file1.close() os.mkdir(os.path.join(self.dirname, 'sub')) file2 = open(os.path.join(self.dirname, 'sub', 'tmpl1.html'), 'w') try: file2.write("""<div>Included sub/tmpl1.html</div>""") finally: file2.close() file3 = open(os.path.join(self.dirname, 'sub', 'tmpl2.html'), 'w') try: file3.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="tmpl1.html" /> </html>""") finally: file3.close() loader = TemplateLoader([self.dirname]) tmpl = loader.load('sub/tmpl2.html') self.assertEqual("""<html> <div>Included sub/tmpl1.html</div> </html>""", tmpl.generate().render(encoding=None)) def test_relative_include_without_search_path(self): file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w') try: file1.write("""<div>Included</div>""") finally: file1.close() file2 = open(os.path.join(self.dirname, 'tmpl2.html'), 'w') try: file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="tmpl1.html" /> </html>""") finally: file2.close() loader = TemplateLoader() tmpl = loader.load(os.path.join(self.dirname, 'tmpl2.html')) self.assertEqual("""<html> <div>Included</div> </html>""", tmpl.generate().render(encoding=None)) def test_relative_include_without_search_path_nested(self): file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w') try: file1.write("""<div>Included</div>""") finally: file1.close() file2 = open(os.path.join(self.dirname, 'tmpl2.html'), 'w') try: file2.write("""<div xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="tmpl1.html" /> </div>""") finally: file2.close() file3 = open(os.path.join(self.dirname, 'tmpl3.html'), 'w') try: file3.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="tmpl2.html" /> </html>""") finally: file3.close() loader = TemplateLoader() tmpl = loader.load(os.path.join(self.dirname, 'tmpl3.html')) self.assertEqual("""<html> <div> <div>Included</div> </div> </html>""", tmpl.generate().render(encoding=None)) def test_relative_include_from_inmemory_template(self): file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w') try: file1.write("""<div>Included</div>""") finally: file1.close() loader = TemplateLoader([self.dirname]) tmpl2 = MarkupTemplate("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="../tmpl1.html" /> </html>""", filename='subdir/tmpl2.html', loader=loader) self.assertEqual("""<html> <div>Included</div> </html>""", tmpl2.generate().render(encoding=None)) def test_relative_absolute_template_preferred(self): file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w') try: file1.write("""<div>Included</div>""") finally: file1.close() os.mkdir(os.path.join(self.dirname, 'sub')) file2 = open(os.path.join(self.dirname, 'sub', 'tmpl1.html'), 'w') try: file2.write("""<div>Included from sub</div>""") finally: file2.close() file3 = open(os.path.join(self.dirname, 'sub', 'tmpl2.html'), 'w') try: file3.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="tmpl1.html" /> </html>""") finally: file3.close() loader = TemplateLoader() tmpl = loader.load(os.path.abspath(os.path.join(self.dirname, 'sub', 'tmpl2.html'))) self.assertEqual("""<html> <div>Included from sub</div> </html>""", tmpl.generate().render(encoding=None)) def test_abspath_caching(self): abspath = os.path.join(self.dirname, 'abs') os.mkdir(abspath) file1 = open(os.path.join(abspath, 'tmpl1.html'), 'w') try: file1.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="tmpl2.html" /> </html>""") finally: file1.close() file2 = open(os.path.join(abspath, 'tmpl2.html'), 'w') try: file2.write("""<div>Included from abspath.</div>""") finally: file2.close() searchpath = os.path.join(self.dirname, 'searchpath') os.mkdir(searchpath) file3 = open(os.path.join(searchpath, 'tmpl2.html'), 'w') try: file3.write("""<div>Included from searchpath.</div>""") finally: file3.close() loader = TemplateLoader(searchpath) tmpl1 = loader.load(os.path.join(abspath, 'tmpl1.html')) self.assertEqual("""<html> <div>Included from searchpath.</div> </html>""", tmpl1.generate().render(encoding=None)) assert 'tmpl2.html' in loader._cache def test_abspath_include_caching_without_search_path(self): file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w') try: file1.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="tmpl2.html" /> </html>""") finally: file1.close() file2 = open(os.path.join(self.dirname, 'tmpl2.html'), 'w') try: file2.write("""<div>Included</div>""") finally: file2.close() os.mkdir(os.path.join(self.dirname, 'sub')) file3 = open(os.path.join(self.dirname, 'sub', 'tmpl1.html'), 'w') try: file3.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="tmpl2.html" /> </html>""") finally: file3.close() file4 = open(os.path.join(self.dirname, 'sub', 'tmpl2.html'), 'w') try: file4.write("""<div>Included from sub</div>""") finally: file4.close() loader = TemplateLoader() tmpl1 = loader.load(os.path.join(self.dirname, 'tmpl1.html')) self.assertEqual("""<html> <div>Included</div> </html>""", tmpl1.generate().render(encoding=None)) tmpl2 = loader.load(os.path.join(self.dirname, 'sub', 'tmpl1.html')) self.assertEqual("""<html> <div>Included from sub</div> </html>""", tmpl2.generate().render(encoding=None)) assert 'tmpl2.html' not in loader._cache def test_load_with_default_encoding(self): f = open(os.path.join(self.dirname, 'tmpl.html'), 'w') try: f.write(u'<div>\xf6</div>'.encode('iso-8859-1')) finally: f.close() loader = TemplateLoader([self.dirname], default_encoding='iso-8859-1') loader.load('tmpl.html') def test_load_with_explicit_encoding(self): f = open(os.path.join(self.dirname, 'tmpl.html'), 'w') try: f.write(u'<div>\xf6</div>'.encode('iso-8859-1')) finally: f.close() loader = TemplateLoader([self.dirname], default_encoding='utf-8') loader.load('tmpl.html', encoding='iso-8859-1') def test_load_with_callback(self): fileobj = open(os.path.join(self.dirname, 'tmpl.html'), 'w') try: fileobj.write("""<html> <p>Hello</p> </html>""") finally: fileobj.close() def template_loaded(template): def my_filter(stream, ctxt): for kind, data, pos in stream: if kind is TEXT and data.strip(): data = ', '.join([data, data.lower()]) yield kind, data, pos template.filters.insert(0, my_filter) loader = TemplateLoader([self.dirname], callback=template_loaded) tmpl = loader.load('tmpl.html') self.assertEqual("""<html> <p>Hello, hello</p> </html>""", tmpl.generate().render(encoding=None)) # Make sure the filter is only added once tmpl = loader.load('tmpl.html') self.assertEqual("""<html> <p>Hello, hello</p> </html>""", tmpl.generate().render(encoding=None)) def test_prefix_delegation_to_directories(self): """ Test prefix delegation with the following layout: templates/foo.html sub1/templates/tmpl1.html sub2/templates/tmpl2.html Where sub1 and sub2 are prefixes, and both tmpl1.html and tmpl2.html incldue foo.html. """ dir1 = os.path.join(self.dirname, 'templates') os.mkdir(dir1) file1 = open(os.path.join(dir1, 'foo.html'), 'w') try: file1.write("""<div>Included foo</div>""") finally: file1.close() dir2 = os.path.join(self.dirname, 'sub1', 'templates') os.makedirs(dir2) file2 = open(os.path.join(dir2, 'tmpl1.html'), 'w') try: file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="../foo.html" /> from sub1 </html>""") finally: file2.close() dir3 = os.path.join(self.dirname, 'sub2', 'templates') os.makedirs(dir3) file3 = open(os.path.join(dir3, 'tmpl2.html'), 'w') try: file3.write("""<div>tmpl2</div>""") finally: file3.close() loader = TemplateLoader([dir1, TemplateLoader.prefixed( sub1 = dir2, sub2 = dir3 )]) tmpl = loader.load('sub1/tmpl1.html') self.assertEqual("""<html> <div>Included foo</div> from sub1 </html>""", tmpl.generate().render(encoding=None)) def test_prefix_delegation_to_directories_with_subdirs(self): """ Test prefix delegation with the following layout: templates/foo.html sub1/templates/tmpl1.html sub1/templates/tmpl2.html sub1/templates/bar/tmpl3.html Where sub1 is a prefix, and tmpl1.html includes all the others. """ dir1 = os.path.join(self.dirname, 'templates') os.mkdir(dir1) file1 = open(os.path.join(dir1, 'foo.html'), 'w') try: file1.write("""<div>Included foo</div>""") finally: file1.close() dir2 = os.path.join(self.dirname, 'sub1', 'templates') os.makedirs(dir2) file2 = open(os.path.join(dir2, 'tmpl1.html'), 'w') try: file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="../foo.html" /> from sub1 <xi:include href="tmpl2.html" /> from sub1 <xi:include href="bar/tmpl3.html" /> from sub1 </html>""") finally: file2.close() file3 = open(os.path.join(dir2, 'tmpl2.html'), 'w') try: file3.write("""<div>tmpl2</div>""") finally: file3.close() dir3 = os.path.join(self.dirname, 'sub1', 'templates', 'bar') os.makedirs(dir3) file4 = open(os.path.join(dir3, 'tmpl3.html'), 'w') try: file4.write("""<div>bar/tmpl3</div>""") finally: file4.close() loader = TemplateLoader([dir1, TemplateLoader.prefixed( sub1 = os.path.join(dir2), sub2 = os.path.join(dir3) )]) tmpl = loader.load('sub1/tmpl1.html') self.assertEqual("""<html> <div>Included foo</div> from sub1 <div>tmpl2</div> from sub1 <div>bar/tmpl3</div> from sub1 </html>""", tmpl.generate().render(encoding=None)) def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(TemplateLoader.__module__)) suite.addTest(unittest.makeSuite(TemplateLoaderTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.0303, 0.0022, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0325, 0.0022, 0, 0.66, 0.1, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0346, 0.0022, 0, 0.6...
[ "import doctest", "import os", "import shutil", "import tempfile", "import unittest", "from genshi.core import TEXT", "from genshi.template.loader import TemplateLoader", "from genshi.template.markup import MarkupTemplate", "class TemplateLoaderTestCase(unittest.TestCase):\n \"\"\"Tests for the t...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # Copyright (C) 2006 Matthew Good # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Basic support for the template engine plugin API used by TurboGears and CherryPy/Buffet. """ from genshi.input import ET, HTML, XML from genshi.output import DocType from genshi.template.base import Template from genshi.template.loader import TemplateLoader from genshi.template.markup import MarkupTemplate from genshi.template.text import TextTemplate, NewTextTemplate __all__ = ['ConfigurationError', 'AbstractTemplateEnginePlugin', 'MarkupTemplateEnginePlugin', 'TextTemplateEnginePlugin'] __docformat__ = 'restructuredtext en' class ConfigurationError(ValueError): """Exception raised when invalid plugin options are encountered.""" class AbstractTemplateEnginePlugin(object): """Implementation of the plugin API.""" template_class = None extension = None def __init__(self, extra_vars_func=None, options=None): self.get_extra_vars = extra_vars_func if options is None: options = {} self.options = options self.default_encoding = options.get('genshi.default_encoding', 'utf-8') auto_reload = options.get('genshi.auto_reload', '1') if isinstance(auto_reload, basestring): auto_reload = auto_reload.lower() in ('1', 'on', 'yes', 'true') search_path = [p for p in options.get('genshi.search_path', '').split(':') if p] self.use_package_naming = not search_path try: max_cache_size = int(options.get('genshi.max_cache_size', 25)) except ValueError: raise ConfigurationError('Invalid value for max_cache_size: "%s"' % options.get('genshi.max_cache_size')) loader_callback = options.get('genshi.loader_callback', None) if loader_callback and not hasattr(loader_callback, '__call__'): raise ConfigurationError('loader callback must be a function') lookup_errors = options.get('genshi.lookup_errors', 'strict') if lookup_errors not in ('lenient', 'strict'): raise ConfigurationError('Unknown lookup errors mode "%s"' % lookup_errors) try: allow_exec = bool(options.get('genshi.allow_exec', True)) except ValueError: raise ConfigurationError('Invalid value for allow_exec "%s"' % options.get('genshi.allow_exec')) self.loader = TemplateLoader([p for p in search_path if p], auto_reload=auto_reload, max_cache_size=max_cache_size, default_class=self.template_class, variable_lookup=lookup_errors, allow_exec=allow_exec, callback=loader_callback) def load_template(self, templatename, template_string=None): """Find a template specified in python 'dot' notation, or load one from a string. """ if template_string is not None: return self.template_class(template_string) if self.use_package_naming: divider = templatename.rfind('.') if divider >= 0: from pkg_resources import resource_filename package = templatename[:divider] basename = templatename[divider + 1:] + self.extension templatename = resource_filename(package, basename) return self.loader.load(templatename) def _get_render_options(self, format=None, fragment=False): if format is None: format = self.default_format kwargs = {'method': format} if self.default_encoding: kwargs['encoding'] = self.default_encoding return kwargs def render(self, info, format=None, fragment=False, template=None): """Render the template to a string using the provided info.""" kwargs = self._get_render_options(format=format, fragment=fragment) return self.transform(info, template).render(**kwargs) def transform(self, info, template): """Render the output to an event stream.""" if not isinstance(template, Template): template = self.load_template(template) return template.generate(**info) class MarkupTemplateEnginePlugin(AbstractTemplateEnginePlugin): """Implementation of the plugin API for markup templates.""" template_class = MarkupTemplate extension = '.html' def __init__(self, extra_vars_func=None, options=None): AbstractTemplateEnginePlugin.__init__(self, extra_vars_func, options) default_doctype = self.options.get('genshi.default_doctype') if default_doctype: doctype = DocType.get(default_doctype) if doctype is None: raise ConfigurationError('Unknown doctype %r' % default_doctype) self.default_doctype = doctype else: self.default_doctype = None format = self.options.get('genshi.default_format', 'html').lower() if format not in ('html', 'xhtml', 'xml', 'text'): raise ConfigurationError('Unknown output format %r' % format) self.default_format = format def _get_render_options(self, format=None, fragment=False): kwargs = super(MarkupTemplateEnginePlugin, self)._get_render_options(format, fragment) if self.default_doctype and not fragment: kwargs['doctype'] = self.default_doctype return kwargs def transform(self, info, template): """Render the output to an event stream.""" data = {'ET': ET, 'HTML': HTML, 'XML': XML} if self.get_extra_vars: data.update(self.get_extra_vars()) data.update(info) return super(MarkupTemplateEnginePlugin, self).transform(data, template) class TextTemplateEnginePlugin(AbstractTemplateEnginePlugin): """Implementation of the plugin API for text templates.""" template_class = TextTemplate extension = '.txt' default_format = 'text' def __init__(self, extra_vars_func=None, options=None): if options is None: options = {} new_syntax = options.get('genshi.new_text_syntax') if isinstance(new_syntax, basestring): new_syntax = new_syntax.lower() in ('1', 'on', 'yes', 'true') if new_syntax: self.template_class = NewTextTemplate AbstractTemplateEnginePlugin.__init__(self, extra_vars_func, options)
[ [ 8, 0, 0.0909, 0.017, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.108, 0.0057, 0, 0.66, 0.0833, 329, 0, 3, 0, 0, 329, 0, 0 ], [ 1, 0, 0.1136, 0.0057, 0, 0.66, ...
[ "\"\"\"Basic support for the template engine plugin API used by TurboGears and\nCherryPy/Buffet.\n\"\"\"", "from genshi.input import ET, HTML, XML", "from genshi.output import DocType", "from genshi.template.base import Template", "from genshi.template.loader import TemplateLoader", "from genshi.template....
# -*- coding: utf-8 -*- # # Copyright (C) 2008-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Emulation of the proper abstract syntax tree API for Python 2.4.""" import compiler import compiler.ast from genshi.template import _ast24 as _ast __all__ = ['_ast', 'parse'] __docformat__ = 'restructuredtext en' def _new(cls, *args, **kwargs): ret = cls() if ret._fields: for attr, value in zip(ret._fields, args): if attr in kwargs: raise ValueError('Field set both in args and kwargs') setattr(ret, attr, value) for attr in kwargs: if (getattr(ret, '_fields', None) and attr in ret._fields) \ or (getattr(ret, '_attributes', None) and attr in ret._attributes): setattr(ret, attr, kwargs[attr]) return ret class ASTUpgrader(object): """Transformer changing structure of Python 2.4 ASTs to Python 2.5 ones. Transforms ``compiler.ast`` Abstract Syntax Tree to builtin ``_ast``. It can use fake`` _ast`` classes and this way allow ``_ast`` emulation in Python 2.4. """ def __init__(self): self.out_flags = None self.lines = [-1] def _new(self, *args, **kwargs): return _new(lineno = self.lines[-1], *args, **kwargs) def visit(self, node): if node is None: return None if type(node) is tuple: return tuple([self.visit(n) for n in node]) lno = getattr(node, 'lineno', None) if lno is not None: self.lines.append(lno) visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None) if visitor is None: raise Exception('Unhandled node type %r' % type(node)) retval = visitor(node) if lno is not None: self.lines.pop() return retval def visit_Module(self, node): body = self.visit(node.node) if node.doc: body = [self._new(_ast.Expr, self._new(_ast.Str, node.doc))] + body return self._new(_ast.Module, body) def visit_Expression(self, node): return self._new(_ast.Expression, self.visit(node.node)) def _extract_args(self, node): tab = node.argnames[:] if node.flags & compiler.ast.CO_VARKEYWORDS: kwarg = tab[-1] tab = tab[:-1] else: kwarg = None if node.flags & compiler.ast.CO_VARARGS: vararg = tab[-1] tab = tab[:-1] else: vararg = None def _tup(t): if isinstance(t, str): return self._new(_ast.Name, t, _ast.Store()) elif isinstance(t, tuple): elts = [_tup(x) for x in t] return self._new(_ast.Tuple, elts, _ast.Store()) else: raise NotImplemented args = [] for arg in tab: if isinstance(arg, str): args.append(self._new(_ast.Name, arg, _ast.Param())) elif isinstance(arg, tuple): args.append(_tup(arg)) else: assert False, node.__class__ defaults = [self.visit(d) for d in node.defaults] return self._new(_ast.arguments, args, vararg, kwarg, defaults) def visit_Function(self, node): if getattr(node, 'decorators', ()): decorators = [self.visit(d) for d in node.decorators.nodes] else: decorators = [] args = self._extract_args(node) body = self.visit(node.code) if node.doc: body = [self._new(_ast.Expr, self._new(_ast.Str, node.doc))] + body return self._new(_ast.FunctionDef, node.name, args, body, decorators) def visit_Class(self, node): #self.name_types.append(_ast.Load) bases = [self.visit(b) for b in node.bases] #self.name_types.pop() body = self.visit(node.code) if node.doc: body = [self._new(_ast.Expr, self._new(_ast.Str, node.doc))] + body return self._new(_ast.ClassDef, node.name, bases, body) def visit_Return(self, node): return self._new(_ast.Return, self.visit(node.value)) def visit_Assign(self, node): #self.name_types.append(_ast.Store) targets = [self.visit(t) for t in node.nodes] #self.name_types.pop() return self._new(_ast.Assign, targets, self.visit(node.expr)) aug_operators = { '+=': _ast.Add, '/=': _ast.Div, '//=': _ast.FloorDiv, '<<=': _ast.LShift, '%=': _ast.Mod, '*=': _ast.Mult, '**=': _ast.Pow, '>>=': _ast.RShift, '-=': _ast.Sub, } def visit_AugAssign(self, node): target = self.visit(node.node) # Because it's AugAssign target can't be list nor tuple # so we only have to change context of one node target.ctx = _ast.Store() op = self.aug_operators[node.op]() return self._new(_ast.AugAssign, target, op, self.visit(node.expr)) def _visit_Print(nl): def _visit(self, node): values = [self.visit(v) for v in node.nodes] return self._new(_ast.Print, self.visit(node.dest), values, nl) return _visit visit_Print = _visit_Print(False) visit_Printnl = _visit_Print(True) del _visit_Print def visit_For(self, node): return self._new(_ast.For, self.visit(node.assign), self.visit(node.list), self.visit(node.body), self.visit(node.else_)) def visit_While(self, node): return self._new(_ast.While, self.visit(node.test), self.visit(node.body), self.visit(node.else_)) def visit_If(self, node): def _level(tests, else_): test = self.visit(tests[0][0]) body = self.visit(tests[0][1]) if len(tests) == 1: orelse = self.visit(else_) else: orelse = [_level(tests[1:], else_)] return self._new(_ast.If, test, body, orelse) return _level(node.tests, node.else_) def visit_With(self, node): return self._new(_ast.With, self.visit(node.expr), self.visit(node.vars), self.visit(node.body)) def visit_Raise(self, node): return self._new(_ast.Raise, self.visit(node.expr1), self.visit(node.expr2), self.visit(node.expr3)) def visit_TryExcept(self, node): handlers = [] for type, name, body in node.handlers: handlers.append(self._new(_ast.excepthandler, self.visit(type), self.visit(name), self.visit(body))) return self._new(_ast.TryExcept, self.visit(node.body), handlers, self.visit(node.else_)) def visit_TryFinally(self, node): return self._new(_ast.TryFinally, self.visit(node.body), self.visit(node.final)) def visit_Assert(self, node): return self._new(_ast.Assert, self.visit(node.test), self.visit(node.fail)) def visit_Import(self, node): names = [self._new(_ast.alias, n[0], n[1]) for n in node.names] return self._new(_ast.Import, names) def visit_From(self, node): names = [self._new(_ast.alias, n[0], n[1]) for n in node.names] return self._new(_ast.ImportFrom, node.modname, names, 0) def visit_Exec(self, node): return self._new(_ast.Exec, self.visit(node.expr), self.visit(node.locals), self.visit(node.globals)) def visit_Global(self, node): return self._new(_ast.Global, node.names[:]) def visit_Discard(self, node): return self._new(_ast.Expr, self.visit(node.expr)) def _map_class(to): def _visit(self, node): return self._new(to) return _visit visit_Pass = _map_class(_ast.Pass) visit_Break = _map_class(_ast.Break) visit_Continue = _map_class(_ast.Continue) def _visit_BinOperator(opcls): def _visit(self, node): return self._new(_ast.BinOp, self.visit(node.left), opcls(), self.visit(node.right)) return _visit visit_Add = _visit_BinOperator(_ast.Add) visit_Div = _visit_BinOperator(_ast.Div) visit_FloorDiv = _visit_BinOperator(_ast.FloorDiv) visit_LeftShift = _visit_BinOperator(_ast.LShift) visit_Mod = _visit_BinOperator(_ast.Mod) visit_Mul = _visit_BinOperator(_ast.Mult) visit_Power = _visit_BinOperator(_ast.Pow) visit_RightShift = _visit_BinOperator(_ast.RShift) visit_Sub = _visit_BinOperator(_ast.Sub) del _visit_BinOperator def _visit_BitOperator(opcls): def _visit(self, node): def _make(nodes): if len(nodes) == 1: return self.visit(nodes[0]) left = _make(nodes[:-1]) right = self.visit(nodes[-1]) return self._new(_ast.BinOp, left, opcls(), right) return _make(node.nodes) return _visit visit_Bitand = _visit_BitOperator(_ast.BitAnd) visit_Bitor = _visit_BitOperator(_ast.BitOr) visit_Bitxor = _visit_BitOperator(_ast.BitXor) del _visit_BitOperator def _visit_UnaryOperator(opcls): def _visit(self, node): return self._new(_ast.UnaryOp, opcls(), self.visit(node.expr)) return _visit visit_Invert = _visit_UnaryOperator(_ast.Invert) visit_Not = _visit_UnaryOperator(_ast.Not) visit_UnaryAdd = _visit_UnaryOperator(_ast.UAdd) visit_UnarySub = _visit_UnaryOperator(_ast.USub) del _visit_UnaryOperator def _visit_BoolOperator(opcls): def _visit(self, node): values = [self.visit(n) for n in node.nodes] return self._new(_ast.BoolOp, opcls(), values) return _visit visit_And = _visit_BoolOperator(_ast.And) visit_Or = _visit_BoolOperator(_ast.Or) del _visit_BoolOperator cmp_operators = { '==': _ast.Eq, '!=': _ast.NotEq, '<': _ast.Lt, '<=': _ast.LtE, '>': _ast.Gt, '>=': _ast.GtE, 'is': _ast.Is, 'is not': _ast.IsNot, 'in': _ast.In, 'not in': _ast.NotIn, } def visit_Compare(self, node): left = self.visit(node.expr) ops = [] comparators = [] for optype, expr in node.ops: ops.append(self.cmp_operators[optype]()) comparators.append(self.visit(expr)) return self._new(_ast.Compare, left, ops, comparators) def visit_Lambda(self, node): args = self._extract_args(node) body = self.visit(node.code) return self._new(_ast.Lambda, args, body) def visit_IfExp(self, node): return self._new(_ast.IfExp, self.visit(node.test), self.visit(node.then), self.visit(node.else_)) def visit_Dict(self, node): keys = [self.visit(x[0]) for x in node.items] values = [self.visit(x[1]) for x in node.items] return self._new(_ast.Dict, keys, values) def visit_ListComp(self, node): generators = [self.visit(q) for q in node.quals] return self._new(_ast.ListComp, self.visit(node.expr), generators) def visit_GenExprInner(self, node): generators = [self.visit(q) for q in node.quals] return self._new(_ast.GeneratorExp, self.visit(node.expr), generators) def visit_GenExpr(self, node): return self.visit(node.code) def visit_GenExprFor(self, node): ifs = [self.visit(i) for i in node.ifs] return self._new(_ast.comprehension, self.visit(node.assign), self.visit(node.iter), ifs) def visit_ListCompFor(self, node): ifs = [self.visit(i) for i in node.ifs] return self._new(_ast.comprehension, self.visit(node.assign), self.visit(node.list), ifs) def visit_GenExprIf(self, node): return self.visit(node.test) visit_ListCompIf = visit_GenExprIf def visit_Yield(self, node): return self._new(_ast.Yield, self.visit(node.value)) def visit_CallFunc(self, node): args = [] keywords = [] for arg in node.args: if isinstance(arg, compiler.ast.Keyword): keywords.append(self._new(_ast.keyword, arg.name, self.visit(arg.expr))) else: args.append(self.visit(arg)) return self._new(_ast.Call, self.visit(node.node), args, keywords, self.visit(node.star_args), self.visit(node.dstar_args)) def visit_Backquote(self, node): return self._new(_ast.Repr, self.visit(node.expr)) def visit_Const(self, node): if node.value is None: # appears in slices return None elif isinstance(node.value, basestring): return self._new(_ast.Str, node.value) else: return self._new(_ast.Num, node.value) def visit_Name(self, node): return self._new(_ast.Name, node.name, _ast.Load()) def visit_Getattr(self, node): return self._new(_ast.Attribute, self.visit(node.expr), node.attrname, _ast.Load()) def visit_Tuple(self, node): nodes = [self.visit(n) for n in node.nodes] return self._new(_ast.Tuple, nodes, _ast.Load()) def visit_List(self, node): nodes = [self.visit(n) for n in node.nodes] return self._new(_ast.List, nodes, _ast.Load()) def get_ctx(self, flags): if flags == 'OP_DELETE': return _ast.Del() elif flags == 'OP_APPLY': return _ast.Load() elif flags == 'OP_ASSIGN': return _ast.Store() else: # FIXME Exception here assert False, repr(flags) def visit_AssName(self, node): self.out_flags = node.flags ctx = self.get_ctx(node.flags) return self._new(_ast.Name, node.name, ctx) def visit_AssAttr(self, node): self.out_flags = node.flags ctx = self.get_ctx(node.flags) return self._new(_ast.Attribute, self.visit(node.expr), node.attrname, ctx) def _visit_AssCollection(cls): def _visit(self, node): flags = None elts = [] for n in node.nodes: elts.append(self.visit(n)) if flags is None: flags = self.out_flags else: assert flags == self.out_flags self.out_flags = flags ctx = self.get_ctx(flags) return self._new(cls, elts, ctx) return _visit visit_AssList = _visit_AssCollection(_ast.List) visit_AssTuple = _visit_AssCollection(_ast.Tuple) del _visit_AssCollection def visit_Slice(self, node): lower = self.visit(node.lower) upper = self.visit(node.upper) ctx = self.get_ctx(node.flags) self.out_flags = node.flags return self._new(_ast.Subscript, self.visit(node.expr), self._new(_ast.Slice, lower, upper, None), ctx) def visit_Subscript(self, node): ctx = self.get_ctx(node.flags) subs = [self.visit(s) for s in node.subs] advanced = (_ast.Slice, _ast.Ellipsis) slices = [] nonindex = False for sub in subs: if isinstance(sub, advanced): nonindex = True slices.append(sub) else: slices.append(self._new(_ast.Index, sub)) if len(slices) == 1: slice = slices[0] elif nonindex: slice = self._new(_ast.ExtSlice, slices) else: slice = self._new(_ast.Tuple, slices, _ast.Load()) self.out_flags = node.flags return self._new(_ast.Subscript, self.visit(node.expr), slice, ctx) def visit_Sliceobj(self, node): a = [self.visit(n) for n in node.nodes + [None]*(3 - len(node.nodes))] return self._new(_ast.Slice, a[0], a[1], a[2]) def visit_Ellipsis(self, node): return self._new(_ast.Ellipsis) def visit_Stmt(self, node): def _check_del(n): # del x is just AssName('x', 'OP_DELETE') # we want to transform it to Delete([Name('x', Del())]) dcls = (_ast.Name, _ast.List, _ast.Subscript, _ast.Attribute) if isinstance(n, dcls) and isinstance(n.ctx, _ast.Del): return self._new(_ast.Delete, [n]) elif isinstance(n, _ast.Tuple) and isinstance(n.ctx, _ast.Del): # unpack last tuple to avoid making del (x, y, z,); # out of del x, y, z; (there's no difference between # this two in compiler.ast) return self._new(_ast.Delete, n.elts) else: return n def _keep(n): if isinstance(n, _ast.Expr) and n.value is None: return False else: return True return [s for s in [_check_del(self.visit(n)) for n in node.nodes] if _keep(s)] def parse(source, mode): node = compiler.parse(source, mode) return ASTUpgrader().visit(node)
[ [ 8, 0, 0.0277, 0.002, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0317, 0.002, 0, 0.66, 0.125, 738, 0, 1, 0, 0, 738, 0, 0 ], [ 1, 0, 0.0337, 0.002, 0, 0.66, ...
[ "\"\"\"Emulation of the proper abstract syntax tree API for Python 2.4.\"\"\"", "import compiler", "import compiler.ast", "from genshi.template import _ast24 as _ast", "__all__ = ['_ast', 'parse']", "__docformat__ = 'restructuredtext en'", "def _new(cls, *args, **kwargs):\n ret = cls()\n if ret._f...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2007 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Implementation of the template engine.""" from genshi.template.base import Context, Template, TemplateError, \ TemplateRuntimeError, TemplateSyntaxError, \ BadDirectiveError from genshi.template.loader import TemplateLoader, TemplateNotFound from genshi.template.markup import MarkupTemplate from genshi.template.text import TextTemplate, OldTextTemplate, NewTextTemplate __docformat__ = 'restructuredtext en'
[ [ 8, 0, 0.6087, 0.0435, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.7391, 0.1304, 0, 0.66, 0.2, 152, 0, 6, 0, 0, 152, 0, 0 ], [ 1, 0, 0.8261, 0.0435, 0, 0.66, ...
[ "\"\"\"Implementation of the template engine.\"\"\"", "from genshi.template.base import Context, Template, TemplateError, \\\n TemplateRuntimeError, TemplateSyntaxError, \\\n BadDirectiveError", "from genshi.template.loader import TemplateLoader, T...
# -*- coding: utf-8 -*- # # Copyright (C) 2008-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Support classes for generating code from abstract syntax trees.""" try: import _ast except ImportError: from genshi.template.ast24 import _ast, parse else: def parse(source, mode): return compile(source, '', mode, _ast.PyCF_ONLY_AST) __docformat__ = 'restructuredtext en' class ASTCodeGenerator(object): """General purpose base class for AST transformations. Every visitor method can be overridden to return an AST node that has been altered or replaced in some way. """ def __init__(self, tree): self.lines_info = [] self.line_info = None self.code = '' self.line = None self.last = None self.indent = 0 self.blame_stack = [] self.visit(tree) if self.line.strip(): self.code += self.line + '\n' self.lines_info.append(self.line_info) self.line = None self.line_info = None def _change_indent(self, delta): self.indent += delta def _new_line(self): if self.line is not None: self.code += self.line + '\n' self.lines_info.append(self.line_info) self.line = ' '*4*self.indent if len(self.blame_stack) == 0: self.line_info = [] self.last = None else: self.line_info = [(0, self.blame_stack[-1],)] self.last = self.blame_stack[-1] def _write(self, s): if len(s) == 0: return if len(self.blame_stack) == 0: if self.last is not None: self.last = None self.line_info.append((len(self.line), self.last)) else: if self.last != self.blame_stack[-1]: self.last = self.blame_stack[-1] self.line_info.append((len(self.line), self.last)) self.line += s def visit(self, node): if node is None: return None if type(node) is tuple: return tuple([self.visit(n) for n in node]) try: self.blame_stack.append((node.lineno, node.col_offset,)) info = True except AttributeError: info = False visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None) if visitor is None: raise Exception('Unhandled node type %r' % type(node)) ret = visitor(node) if info: self.blame_stack.pop() return ret def visit_Module(self, node): for n in node.body: self.visit(n) visit_Interactive = visit_Module visit_Suite = visit_Module def visit_Expression(self, node): self._new_line() return self.visit(node.body) # arguments = (expr* args, identifier? vararg, # identifier? kwarg, expr* defaults) def visit_arguments(self, node): first = True no_default_count = len(node.args) - len(node.defaults) for i, arg in enumerate(node.args): if not first: self._write(', ') else: first = False self.visit(arg) if i >= no_default_count: self._write('=') self.visit(node.defaults[i - no_default_count]) if getattr(node, 'vararg', None): if not first: self._write(', ') else: first = False self._write('*' + node.vararg) if getattr(node, 'kwarg', None): if not first: self._write(', ') else: first = False self._write('**' + node.kwarg) # FunctionDef(identifier name, arguments args, # stmt* body, expr* decorators) def visit_FunctionDef(self, node): for decorator in getattr(node, 'decorators', ()): self._new_line() self._write('@') self.visit(decorator) self._new_line() self._write('def ' + node.name + '(') self.visit(node.args) self._write('):') self._change_indent(1) for statement in node.body: self.visit(statement) self._change_indent(-1) # ClassDef(identifier name, expr* bases, stmt* body) def visit_ClassDef(self, node): self._new_line() self._write('class ' + node.name) if node.bases: self._write('(') self.visit(node.bases[0]) for base in node.bases[1:]: self._write(', ') self.visit(base) self._write(')') self._write(':') self._change_indent(1) for statement in node.body: self.visit(statement) self._change_indent(-1) # Return(expr? value) def visit_Return(self, node): self._new_line() self._write('return') if getattr(node, 'value', None): self._write(' ') self.visit(node.value) # Delete(expr* targets) def visit_Delete(self, node): self._new_line() self._write('del ') self.visit(node.targets[0]) for target in node.targets[1:]: self._write(', ') self.visit(target) # Assign(expr* targets, expr value) def visit_Assign(self, node): self._new_line() for target in node.targets: self.visit(target) self._write(' = ') self.visit(node.value) # AugAssign(expr target, operator op, expr value) def visit_AugAssign(self, node): self._new_line() self.visit(node.target) self._write(' ' + self.binary_operators[node.op.__class__] + '= ') self.visit(node.value) # Print(expr? dest, expr* values, bool nl) def visit_Print(self, node): self._new_line() self._write('print') if getattr(node, 'dest', None): self._write(' >> ') self.visit(node.dest) if getattr(node, 'values', None): self._write(', ') else: self._write(' ') if getattr(node, 'values', None): self.visit(node.values[0]) for value in node.values[1:]: self._write(', ') self.visit(value) if not node.nl: self._write(',') # For(expr target, expr iter, stmt* body, stmt* orelse) def visit_For(self, node): self._new_line() self._write('for ') self.visit(node.target) self._write(' in ') self.visit(node.iter) self._write(':') self._change_indent(1) for statement in node.body: self.visit(statement) self._change_indent(-1) if getattr(node, 'orelse', None): self._new_line() self._write('else:') self._change_indent(1) for statement in node.orelse: self.visit(statement) self._change_indent(-1) # While(expr test, stmt* body, stmt* orelse) def visit_While(self, node): self._new_line() self._write('while ') self.visit(node.test) self._write(':') self._change_indent(1) for statement in node.body: self.visit(statement) self._change_indent(-1) if getattr(node, 'orelse', None): self._new_line() self._write('else:') self._change_indent(1) for statement in node.orelse: self.visit(statement) self._change_indent(-1) # If(expr test, stmt* body, stmt* orelse) def visit_If(self, node): self._new_line() self._write('if ') self.visit(node.test) self._write(':') self._change_indent(1) for statement in node.body: self.visit(statement) self._change_indent(-1) if getattr(node, 'orelse', None): self._new_line() self._write('else:') self._change_indent(1) for statement in node.orelse: self.visit(statement) self._change_indent(-1) # With(expr context_expr, expr? optional_vars, stmt* body) def visit_With(self, node): self._new_line() self._write('with ') self.visit(node.context_expr) if getattr(node, 'optional_vars', None): self._write(' as ') self.visit(node.optional_vars) self._write(':') self._change_indent(1) for statement in node.body: self.visit(statement) self._change_indent(-1) # Raise(expr? type, expr? inst, expr? tback) def visit_Raise(self, node): self._new_line() self._write('raise') if not node.type: return self._write(' ') self.visit(node.type) if not node.inst: return self._write(', ') self.visit(node.inst) if not node.tback: return self._write(', ') self.visit(node.tback) # TryExcept(stmt* body, excepthandler* handlers, stmt* orelse) def visit_TryExcept(self, node): self._new_line() self._write('try:') self._change_indent(1) for statement in node.body: self.visit(statement) self._change_indent(-1) if getattr(node, 'handlers', None): for handler in node.handlers: self.visit(handler) self._new_line() if getattr(node, 'orelse', None): self._write('else:') self._change_indent(1) for statement in node.orelse: self.visit(statement) self._change_indent(-1) # excepthandler = (expr? type, expr? name, stmt* body) def visit_ExceptHandler(self, node): self._new_line() self._write('except') if getattr(node, 'type', None): self._write(' ') self.visit(node.type) if getattr(node, 'name', None): self._write(', ') self.visit(node.name) self._write(':') self._change_indent(1) for statement in node.body: self.visit(statement) self._change_indent(-1) visit_excepthandler = visit_ExceptHandler # TryFinally(stmt* body, stmt* finalbody) def visit_TryFinally(self, node): self._new_line() self._write('try:') self._change_indent(1) for statement in node.body: self.visit(statement) self._change_indent(-1) if getattr(node, 'finalbody', None): self._new_line() self._write('finally:') self._change_indent(1) for statement in node.finalbody: self.visit(statement) self._change_indent(-1) # Assert(expr test, expr? msg) def visit_Assert(self, node): self._new_line() self._write('assert ') self.visit(node.test) if getattr(node, 'msg', None): self._write(', ') self.visit(node.msg) def visit_alias(self, node): self._write(node.name) if getattr(node, 'asname', None): self._write(' as ') self._write(node.asname) # Import(alias* names) def visit_Import(self, node): self._new_line() self._write('import ') self.visit(node.names[0]) for name in node.names[1:]: self._write(', ') self.visit(name) # ImportFrom(identifier module, alias* names, int? level) def visit_ImportFrom(self, node): self._new_line() self._write('from ') if node.level: self._write('.' * node.level) self._write(node.module) self._write(' import ') self.visit(node.names[0]) for name in node.names[1:]: self._write(', ') self.visit(name) # Exec(expr body, expr? globals, expr? locals) def visit_Exec(self, node): self._new_line() self._write('exec ') self.visit(node.body) if not node.globals: return self._write(', ') self.visit(node.globals) if not node.locals: return self._write(', ') self.visit(node.locals) # Global(identifier* names) def visit_Global(self, node): self._new_line() self._write('global ') self.visit(node.names[0]) for name in node.names[1:]: self._write(', ') self.visit(name) # Expr(expr value) def visit_Expr(self, node): self._new_line() self.visit(node.value) # Pass def visit_Pass(self, node): self._new_line() self._write('pass') # Break def visit_Break(self, node): self._new_line() self._write('break') # Continue def visit_Continue(self, node): self._new_line() self._write('continue') ### EXPRESSIONS def with_parens(f): def _f(self, node): self._write('(') f(self, node) self._write(')') return _f bool_operators = {_ast.And: 'and', _ast.Or: 'or'} # BoolOp(boolop op, expr* values) @with_parens def visit_BoolOp(self, node): joiner = ' ' + self.bool_operators[node.op.__class__] + ' ' self.visit(node.values[0]) for value in node.values[1:]: self._write(joiner) self.visit(value) binary_operators = { _ast.Add: '+', _ast.Sub: '-', _ast.Mult: '*', _ast.Div: '/', _ast.Mod: '%', _ast.Pow: '**', _ast.LShift: '<<', _ast.RShift: '>>', _ast.BitOr: '|', _ast.BitXor: '^', _ast.BitAnd: '&', _ast.FloorDiv: '//' } # BinOp(expr left, operator op, expr right) @with_parens def visit_BinOp(self, node): self.visit(node.left) self._write(' ' + self.binary_operators[node.op.__class__] + ' ') self.visit(node.right) unary_operators = { _ast.Invert: '~', _ast.Not: 'not', _ast.UAdd: '+', _ast.USub: '-', } # UnaryOp(unaryop op, expr operand) def visit_UnaryOp(self, node): self._write(self.unary_operators[node.op.__class__] + ' ') self.visit(node.operand) # Lambda(arguments args, expr body) @with_parens def visit_Lambda(self, node): self._write('lambda ') self.visit(node.args) self._write(': ') self.visit(node.body) # IfExp(expr test, expr body, expr orelse) @with_parens def visit_IfExp(self, node): self.visit(node.body) self._write(' if ') self.visit(node.test) self._write(' else ') self.visit(node.orelse) # Dict(expr* keys, expr* values) def visit_Dict(self, node): self._write('{') for key, value in zip(node.keys, node.values): self.visit(key) self._write(': ') self.visit(value) self._write(', ') self._write('}') # ListComp(expr elt, comprehension* generators) def visit_ListComp(self, node): self._write('[') self.visit(node.elt) for generator in node.generators: # comprehension = (expr target, expr iter, expr* ifs) self._write(' for ') self.visit(generator.target) self._write(' in ') self.visit(generator.iter) for ifexpr in generator.ifs: self._write(' if ') self.visit(ifexpr) self._write(']') # GeneratorExp(expr elt, comprehension* generators) def visit_GeneratorExp(self, node): self._write('(') self.visit(node.elt) for generator in node.generators: # comprehension = (expr target, expr iter, expr* ifs) self._write(' for ') self.visit(generator.target) self._write(' in ') self.visit(generator.iter) for ifexpr in generator.ifs: self._write(' if ') self.visit(ifexpr) self._write(')') # Yield(expr? value) def visit_Yield(self, node): self._write('yield') if getattr(node, 'value', None): self._write(' ') self.visit(node.value) comparision_operators = { _ast.Eq: '==', _ast.NotEq: '!=', _ast.Lt: '<', _ast.LtE: '<=', _ast.Gt: '>', _ast.GtE: '>=', _ast.Is: 'is', _ast.IsNot: 'is not', _ast.In: 'in', _ast.NotIn: 'not in', } # Compare(expr left, cmpop* ops, expr* comparators) @with_parens def visit_Compare(self, node): self.visit(node.left) for op, comparator in zip(node.ops, node.comparators): self._write(' ' + self.comparision_operators[op.__class__] + ' ') self.visit(comparator) # Call(expr func, expr* args, keyword* keywords, # expr? starargs, expr? kwargs) def visit_Call(self, node): self.visit(node.func) self._write('(') first = True for arg in node.args: if not first: self._write(', ') first = False self.visit(arg) for keyword in node.keywords: if not first: self._write(', ') first = False # keyword = (identifier arg, expr value) self._write(keyword.arg) self._write('=') self.visit(keyword.value) if getattr(node, 'starargs', None): if not first: self._write(', ') first = False self._write('*') self.visit(node.starargs) if getattr(node, 'kwargs', None): if not first: self._write(', ') first = False self._write('**') self.visit(node.kwargs) self._write(')') # Repr(expr value) def visit_Repr(self, node): self._write('`') self.visit(node.value) self._write('`') # Num(object n) def visit_Num(self, node): self._write(repr(node.n)) # Str(string s) def visit_Str(self, node): self._write(repr(node.s)) # Attribute(expr value, identifier attr, expr_context ctx) def visit_Attribute(self, node): self.visit(node.value) self._write('.') self._write(node.attr) # Subscript(expr value, slice slice, expr_context ctx) def visit_Subscript(self, node): self.visit(node.value) self._write('[') def _process_slice(node): if isinstance(node, _ast.Ellipsis): self._write('...') elif isinstance(node, _ast.Slice): if getattr(node, 'lower', 'None'): self.visit(node.lower) self._write(':') if getattr(node, 'upper', None): self.visit(node.upper) if getattr(node, 'step', None): self._write(':') self.visit(node.step) elif isinstance(node, _ast.Index): self.visit(node.value) elif isinstance(node, _ast.ExtSlice): self.visit(node.dims[0]) for dim in node.dims[1:]: self._write(', ') self.visit(dim) else: raise NotImplemented('Slice type not implemented') _process_slice(node.slice) self._write(']') # Name(identifier id, expr_context ctx) def visit_Name(self, node): self._write(node.id) # List(expr* elts, expr_context ctx) def visit_List(self, node): self._write('[') for elt in node.elts: self.visit(elt) self._write(', ') self._write(']') # Tuple(expr *elts, expr_context ctx) def visit_Tuple(self, node): self._write('(') for elt in node.elts: self.visit(elt) self._write(', ') self._write(')') class ASTTransformer(object): """General purpose base class for AST transformations. Every visitor method can be overridden to return an AST node that has been altered or replaced in some way. """ def visit(self, node): if node is None: return None if type(node) is tuple: return tuple([self.visit(n) for n in node]) visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None) if visitor is None: return node return visitor(node) def _clone(self, node): clone = node.__class__() for name in getattr(clone, '_attributes', ()): try: setattr(clone, 'name', getattr(node, name)) except AttributeError: pass for name in clone._fields: try: value = getattr(node, name) except AttributeError: pass else: if value is None: pass elif isinstance(value, list): value = [self.visit(x) for x in value] elif isinstance(value, tuple): value = tuple(self.visit(x) for x in value) else: value = self.visit(value) setattr(clone, name, value) return clone visit_Module = _clone visit_Interactive = _clone visit_Expression = _clone visit_Suite = _clone visit_FunctionDef = _clone visit_ClassDef = _clone visit_Return = _clone visit_Delete = _clone visit_Assign = _clone visit_AugAssign = _clone visit_Print = _clone visit_For = _clone visit_While = _clone visit_If = _clone visit_With = _clone visit_Raise = _clone visit_TryExcept = _clone visit_TryFinally = _clone visit_Assert = _clone visit_Import = _clone visit_ImportFrom = _clone visit_Exec = _clone visit_Global = _clone visit_Expr = _clone # Pass, Break, Continue don't need to be copied visit_BoolOp = _clone visit_BinOp = _clone visit_UnaryOp = _clone visit_Lambda = _clone visit_IfExp = _clone visit_Dict = _clone visit_ListComp = _clone visit_GeneratorExp = _clone visit_Yield = _clone visit_Compare = _clone visit_Call = _clone visit_Repr = _clone # Num, Str don't need to be copied visit_Attribute = _clone visit_Subscript = _clone visit_Name = _clone visit_List = _clone visit_Tuple = _clone visit_comprehension = _clone visit_excepthandler = _clone visit_arguments = _clone visit_keyword = _clone visit_alias = _clone visit_Slice = _clone visit_ExtSlice = _clone visit_Index = _clone del _clone
[ [ 8, 0, 0.018, 0.0013, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 7, 0, 0.0244, 0.009, 0, 0.66, 0.25, 0, 0, 1, 0, 0, 0, 0, 1 ], [ 1, 1, 0.0219, 0.0013, 1, 0.58, 0,...
[ "\"\"\"Support classes for generating code from abstract syntax trees.\"\"\"", "try:\n import _ast\nexcept ImportError:\n from genshi.template.ast24 import _ast, parse\nelse:\n def parse(source, mode):\n return compile(source, '', mode, _ast.PyCF_ONLY_AST)", " import _ast", " from genshi...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Template loading and caching.""" import os try: import threading except ImportError: import dummy_threading as threading from genshi.template.base import TemplateError from genshi.util import LRUCache __all__ = ['TemplateLoader', 'TemplateNotFound', 'directory', 'package', 'prefixed'] __docformat__ = 'restructuredtext en' class TemplateNotFound(TemplateError): """Exception raised when a specific template file could not be found.""" def __init__(self, name, search_path): """Create the exception. :param name: the filename of the template :param search_path: the search path used to lookup the template """ TemplateError.__init__(self, 'Template "%s" not found' % name) self.search_path = search_path class TemplateLoader(object): """Responsible for loading templates from files on the specified search path. >>> import tempfile >>> fd, path = tempfile.mkstemp(suffix='.html', prefix='template') >>> os.write(fd, '<p>$var</p>') 11 >>> os.close(fd) The template loader accepts a list of directory paths that are then used when searching for template files, in the given order: >>> loader = TemplateLoader([os.path.dirname(path)]) The `load()` method first checks the template cache whether the requested template has already been loaded. If not, it attempts to locate the template file, and returns the corresponding `Template` object: >>> from genshi.template import MarkupTemplate >>> template = loader.load(os.path.basename(path)) >>> isinstance(template, MarkupTemplate) True Template instances are cached: requesting a template with the same name results in the same instance being returned: >>> loader.load(os.path.basename(path)) is template True The `auto_reload` option can be used to control whether a template should be automatically reloaded when the file it was loaded from has been changed. Disable this automatic reloading to improve performance. >>> os.remove(path) """ def __init__(self, search_path=None, auto_reload=False, default_encoding=None, max_cache_size=25, default_class=None, variable_lookup='strict', allow_exec=True, callback=None): """Create the template laoder. :param search_path: a list of absolute path names that should be searched for template files, or a string containing a single absolute path; alternatively, any item on the list may be a ''load function'' that is passed a filename and returns a file-like object and some metadata :param auto_reload: whether to check the last modification time of template files, and reload them if they have changed :param default_encoding: the default encoding to assume when loading templates; defaults to UTF-8 :param max_cache_size: the maximum number of templates to keep in the cache :param default_class: the default `Template` subclass to use when instantiating templates :param variable_lookup: the variable lookup mechanism; either "strict" (the default), "lenient", or a custom lookup class :param allow_exec: whether to allow Python code blocks in templates :param callback: (optional) a callback function that is invoked after a template was initialized by this loader; the function is passed the template object as only argument. This callback can be used for example to add any desired filters to the template :see: `LenientLookup`, `StrictLookup` :note: Changed in 0.5: Added the `allow_exec` argument """ from genshi.template.markup import MarkupTemplate self.search_path = search_path if self.search_path is None: self.search_path = [] elif not isinstance(self.search_path, (list, tuple)): self.search_path = [self.search_path] self.auto_reload = auto_reload """Whether templates should be reloaded when the underlying file is changed""" self.default_encoding = default_encoding self.default_class = default_class or MarkupTemplate self.variable_lookup = variable_lookup self.allow_exec = allow_exec if callback is not None and not hasattr(callback, '__call__'): raise TypeError('The "callback" parameter needs to be callable') self.callback = callback self._cache = LRUCache(max_cache_size) self._uptodate = {} self._lock = threading.RLock() def load(self, filename, relative_to=None, cls=None, encoding=None): """Load the template with the given name. If the `filename` parameter is relative, this method searches the search path trying to locate a template matching the given name. If the file name is an absolute path, the search path is ignored. If the requested template is not found, a `TemplateNotFound` exception is raised. Otherwise, a `Template` object is returned that represents the parsed template. Template instances are cached to avoid having to parse the same template file more than once. Thus, subsequent calls of this method with the same template file name will return the same `Template` object (unless the ``auto_reload`` option is enabled and the file was changed since the last parse.) If the `relative_to` parameter is provided, the `filename` is interpreted as being relative to that path. :param filename: the relative path of the template file to load :param relative_to: the filename of the template from which the new template is being loaded, or ``None`` if the template is being loaded directly :param cls: the class of the template object to instantiate :param encoding: the encoding of the template to load; defaults to the ``default_encoding`` of the loader instance :return: the loaded `Template` instance :raises TemplateNotFound: if a template with the given name could not be found """ if cls is None: cls = self.default_class search_path = self.search_path # Make the filename relative to the template file its being loaded # from, but only if that file is specified as a relative path, or no # search path has been set up if relative_to and (not search_path or not os.path.isabs(relative_to)): filename = os.path.join(os.path.dirname(relative_to), filename) filename = os.path.normpath(filename) cachekey = filename self._lock.acquire() try: # First check the cache to avoid reparsing the same file try: tmpl = self._cache[cachekey] if not self.auto_reload: return tmpl uptodate = self._uptodate[cachekey] if uptodate is not None and uptodate(): return tmpl except (KeyError, OSError): pass isabs = False if os.path.isabs(filename): # Bypass the search path if the requested filename is absolute search_path = [os.path.dirname(filename)] isabs = True elif relative_to and os.path.isabs(relative_to): # Make sure that the directory containing the including # template is on the search path dirname = os.path.dirname(relative_to) if dirname not in search_path: search_path = list(search_path) + [dirname] isabs = True elif not search_path: # Uh oh, don't know where to look for the template raise TemplateError('Search path for templates not configured') for loadfunc in search_path: if isinstance(loadfunc, basestring): loadfunc = directory(loadfunc) try: filepath, filename, fileobj, uptodate = loadfunc(filename) except IOError: continue else: try: if isabs: # If the filename of either the included or the # including template is absolute, make sure the # included template gets an absolute path, too, # so that nested includes work properly without a # search path filename = filepath tmpl = self._instantiate(cls, fileobj, filepath, filename, encoding=encoding) if self.callback: self.callback(tmpl) self._cache[cachekey] = tmpl self._uptodate[cachekey] = uptodate finally: if hasattr(fileobj, 'close'): fileobj.close() return tmpl raise TemplateNotFound(filename, search_path) finally: self._lock.release() def _instantiate(self, cls, fileobj, filepath, filename, encoding=None): """Instantiate and return the `Template` object based on the given class and parameters. This function is intended for subclasses to override if they need to implement special template instantiation logic. Code that just uses the `TemplateLoader` should use the `load` method instead. :param cls: the class of the template object to instantiate :param fileobj: a readable file-like object containing the template source :param filepath: the absolute path to the template file :param filename: the path to the template file relative to the search path :param encoding: the encoding of the template to load; defaults to the ``default_encoding`` of the loader instance :return: the loaded `Template` instance :rtype: `Template` """ if encoding is None: encoding = self.default_encoding return cls(fileobj, filepath=filepath, filename=filename, loader=self, encoding=encoding, lookup=self.variable_lookup, allow_exec=self.allow_exec) @staticmethod def directory(path): """Loader factory for loading templates from a local directory. :param path: the path to the local directory containing the templates :return: the loader function to load templates from the given directory :rtype: ``function`` """ def _load_from_directory(filename): filepath = os.path.join(path, filename) fileobj = open(filepath, 'U') mtime = os.path.getmtime(filepath) def _uptodate(): return mtime == os.path.getmtime(filepath) return filepath, filename, fileobj, _uptodate return _load_from_directory @staticmethod def package(name, path): """Loader factory for loading templates from egg package data. :param name: the name of the package containing the resources :param path: the path inside the package data :return: the loader function to load templates from the given package :rtype: ``function`` """ from pkg_resources import resource_stream def _load_from_package(filename): filepath = os.path.join(path, filename) return filepath, filename, resource_stream(name, filepath), None return _load_from_package @staticmethod def prefixed(**delegates): """Factory for a load function that delegates to other loaders depending on the prefix of the requested template path. The prefix is stripped from the filename when passing on the load request to the delegate. >>> load = prefixed( ... app1 = lambda filename: ('app1', filename, None, None), ... app2 = lambda filename: ('app2', filename, None, None) ... ) >>> print(load('app1/foo.html')) ('app1', 'app1/foo.html', None, None) >>> print(load('app2/bar.html')) ('app2', 'app2/bar.html', None, None) :param delegates: mapping of path prefixes to loader functions :return: the loader function :rtype: ``function`` """ def _dispatch_by_prefix(filename): for prefix, delegate in delegates.items(): if filename.startswith(prefix): if isinstance(delegate, basestring): delegate = directory(delegate) filepath, _, fileobj, uptodate = delegate( filename[len(prefix):].lstrip('/\\') ) return filepath, filename, fileobj, uptodate raise TemplateNotFound(filename, list(delegates.keys())) return _dispatch_by_prefix directory = TemplateLoader.directory package = TemplateLoader.package prefixed = TemplateLoader.prefixed
[ [ 8, 0, 0.0418, 0.003, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0478, 0.003, 0, 0.66, 0.0909, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 7, 0, 0.0552, 0.0119, 0, 0.66, ...
[ "\"\"\"Template loading and caching.\"\"\"", "import os", "try:\n import threading\nexcept ImportError:\n import dummy_threading as threading", " import threading", " import dummy_threading as threading", "from genshi.template.base import TemplateError", "from genshi.util import LRUCache", ...
# -*- coding: utf-8 -*- # # Copyright (C) 2007-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """A filter for functional-style transformations of markup streams. The `Transformer` filter provides a variety of transformations that can be applied to parts of streams that match given XPath expressions. These transformations can be chained to achieve results that would be comparitively tedious to achieve by writing stream filters by hand. The approach of chaining node selection and transformation has been inspired by the `jQuery`_ Javascript library. .. _`jQuery`: http://jquery.com/ For example, the following transformation removes the ``<title>`` element from the ``<head>`` of the input document: >>> from genshi.builder import tag >>> html = HTML('''<html> ... <head><title>Some Title</title></head> ... <body> ... Some <em>body</em> text. ... </body> ... </html>''') >>> print(html | Transformer('body/em').map(unicode.upper, TEXT) ... .unwrap().wrap(tag.u)) <html> <head><title>Some Title</title></head> <body> Some <u>BODY</u> text. </body> </html> The ``Transformer`` support a large number of useful transformations out of the box, but custom transformations can be added easily. :since: version 0.5 """ import re import sys from genshi.builder import Element from genshi.core import Stream, Attrs, QName, TEXT, START, END, _ensure, Markup from genshi.path import Path __all__ = ['Transformer', 'StreamBuffer', 'InjectorTransformation', 'ENTER', 'EXIT', 'INSIDE', 'OUTSIDE', 'BREAK'] class TransformMark(str): """A mark on a transformation stream.""" __slots__ = [] _instances = {} def __new__(cls, val): return cls._instances.setdefault(val, str.__new__(cls, val)) ENTER = TransformMark('ENTER') """Stream augmentation mark indicating that a selected element is being entered.""" INSIDE = TransformMark('INSIDE') """Stream augmentation mark indicating that processing is currently inside a selected element.""" OUTSIDE = TransformMark('OUTSIDE') """Stream augmentation mark indicating that a match occurred outside a selected element.""" ATTR = TransformMark('ATTR') """Stream augmentation mark indicating a selected element attribute.""" EXIT = TransformMark('EXIT') """Stream augmentation mark indicating that a selected element is being exited.""" BREAK = TransformMark('BREAK') """Stream augmentation mark indicating a break between two otherwise contiguous blocks of marked events. This is used primarily by the cut() transform to provide later transforms with an opportunity to operate on the cut buffer. """ class PushBackStream(object): """Allows a single event to be pushed back onto the stream and re-consumed. """ def __init__(self, stream): self.stream = iter(stream) self.peek = None def push(self, event): assert self.peek is None self.peek = event def __iter__(self): while True: if self.peek is not None: peek = self.peek self.peek = None yield peek else: try: event = self.stream.next() yield event except StopIteration: if self.peek is None: raise class Transformer(object): """Stream filter that can apply a variety of different transformations to a stream. This is achieved by selecting the events to be transformed using XPath, then applying the transformations to the events matched by the path expression. Each marked event is in the form (mark, (kind, data, pos)), where mark can be any of `ENTER`, `INSIDE`, `EXIT`, `OUTSIDE`, or `None`. The first three marks match `START` and `END` events, and any events contained `INSIDE` any selected XML/HTML element. A non-element match outside a `START`/`END` container (e.g. ``text()``) will yield an `OUTSIDE` mark. >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') Transformations act on selected stream events matching an XPath expression. Here's an example of removing some markup (the title, in this case) selected by an expression: >>> print(html | Transformer('head/title').remove()) <html><head/><body>Some <em>body</em> text.</body></html> Inserted content can be passed in the form of a string, or a markup event stream, which includes streams generated programmatically via the `builder` module: >>> from genshi.builder import tag >>> print(html | Transformer('body').prepend(tag.h1('Document Title'))) <html><head><title>Some Title</title></head><body><h1>Document Title</h1>Some <em>body</em> text.</body></html> Each XPath expression determines the set of tags that will be acted upon by subsequent transformations. In this example we select the ``<title>`` text, copy it into a buffer, then select the ``<body>`` element and paste the copied text into the body as ``<h1>`` enclosed text: >>> buffer = StreamBuffer() >>> print(html | Transformer('head/title/text()').copy(buffer) ... .end().select('body').prepend(tag.h1(buffer))) <html><head><title>Some Title</title></head><body><h1>Some Title</h1>Some <em>body</em> text.</body></html> Transformations can also be assigned and reused, although care must be taken when using buffers, to ensure that buffers are cleared between transforms: >>> emphasis = Transformer('body//em').attr('class', 'emphasis') >>> print(html | emphasis) <html><head><title>Some Title</title></head><body>Some <em class="emphasis">body</em> text.</body></html> """ __slots__ = ['transforms'] def __init__(self, path='.'): """Construct a new transformation filter. :param path: an XPath expression (as string) or a `Path` instance """ self.transforms = [SelectTransformation(path)] def __call__(self, stream, keep_marks=False): """Apply the transform filter to the marked stream. :param stream: the marked event stream to filter :param keep_marks: Do not strip transformer selection marks from the stream. Useful for testing. :return: the transformed stream :rtype: `Stream` """ transforms = self._mark(stream) for link in self.transforms: transforms = link(transforms) if not keep_marks: transforms = self._unmark(transforms) return Stream(transforms, serializer=getattr(stream, 'serializer', None)) def apply(self, function): """Apply a transformation to the stream. Transformations can be chained, similar to stream filters. Any callable accepting a marked stream can be used as a transform. As an example, here is a simple `TEXT` event upper-casing transform: >>> def upper(stream): ... for mark, (kind, data, pos) in stream: ... if mark and kind is TEXT: ... yield mark, (kind, data.upper(), pos) ... else: ... yield mark, (kind, data, pos) >>> short_stream = HTML('<body>Some <em>test</em> text</body>') >>> print(short_stream | Transformer('.//em/text()').apply(upper)) <body>Some <em>TEST</em> text</body> """ transformer = Transformer() transformer.transforms = self.transforms[:] if isinstance(function, Transformer): transformer.transforms.extend(function.transforms) else: transformer.transforms.append(function) return transformer #{ Selection operations def select(self, path): """Mark events matching the given XPath expression, within the current selection. >>> html = HTML('<body>Some <em>test</em> text</body>') >>> print(html | Transformer().select('.//em').trace()) (None, ('START', (QName('body'), Attrs()), (None, 1, 0))) (None, ('TEXT', u'Some ', (None, 1, 6))) ('ENTER', ('START', (QName('em'), Attrs()), (None, 1, 11))) ('INSIDE', ('TEXT', u'test', (None, 1, 15))) ('EXIT', ('END', QName('em'), (None, 1, 19))) (None, ('TEXT', u' text', (None, 1, 24))) (None, ('END', QName('body'), (None, 1, 29))) <body>Some <em>test</em> text</body> :param path: an XPath expression (as string) or a `Path` instance :return: the stream augmented by transformation marks :rtype: `Transformer` """ return self.apply(SelectTransformation(path)) def invert(self): """Invert selection so that marked events become unmarked, and vice versa. Specificaly, all marks are converted to null marks, and all null marks are converted to OUTSIDE marks. >>> html = HTML('<body>Some <em>test</em> text</body>') >>> print(html | Transformer('//em').invert().trace()) ('OUTSIDE', ('START', (QName('body'), Attrs()), (None, 1, 0))) ('OUTSIDE', ('TEXT', u'Some ', (None, 1, 6))) (None, ('START', (QName('em'), Attrs()), (None, 1, 11))) (None, ('TEXT', u'test', (None, 1, 15))) (None, ('END', QName('em'), (None, 1, 19))) ('OUTSIDE', ('TEXT', u' text', (None, 1, 24))) ('OUTSIDE', ('END', QName('body'), (None, 1, 29))) <body>Some <em>test</em> text</body> :rtype: `Transformer` """ return self.apply(InvertTransformation()) def end(self): """End current selection, allowing all events to be selected. Example: >>> html = HTML('<body>Some <em>test</em> text</body>') >>> print(html | Transformer('//em').end().trace()) ('OUTSIDE', ('START', (QName('body'), Attrs()), (None, 1, 0))) ('OUTSIDE', ('TEXT', u'Some ', (None, 1, 6))) ('OUTSIDE', ('START', (QName('em'), Attrs()), (None, 1, 11))) ('OUTSIDE', ('TEXT', u'test', (None, 1, 15))) ('OUTSIDE', ('END', QName('em'), (None, 1, 19))) ('OUTSIDE', ('TEXT', u' text', (None, 1, 24))) ('OUTSIDE', ('END', QName('body'), (None, 1, 29))) <body>Some <em>test</em> text</body> :return: the stream augmented by transformation marks :rtype: `Transformer` """ return self.apply(EndTransformation()) #{ Deletion operations def empty(self): """Empty selected elements of all content. Example: >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('.//em').empty()) <html><head><title>Some Title</title></head><body>Some <em/> text.</body></html> :rtype: `Transformer` """ return self.apply(EmptyTransformation()) def remove(self): """Remove selection from the stream. Example: >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('.//em').remove()) <html><head><title>Some Title</title></head><body>Some text.</body></html> :rtype: `Transformer` """ return self.apply(RemoveTransformation()) #{ Direct element operations def unwrap(self): """Remove outermost enclosing elements from selection. Example: >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('.//em').unwrap()) <html><head><title>Some Title</title></head><body>Some body text.</body></html> :rtype: `Transformer` """ return self.apply(UnwrapTransformation()) def wrap(self, element): """Wrap selection in an element. >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('.//em').wrap('strong')) <html><head><title>Some Title</title></head><body>Some <strong><em>body</em></strong> text.</body></html> :param element: either a tag name (as string) or an `Element` object :rtype: `Transformer` """ return self.apply(WrapTransformation(element)) #{ Content insertion operations def replace(self, content): """Replace selection with content. >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('.//title/text()').replace('New Title')) <html><head><title>New Title</title></head><body>Some <em>body</em> text.</body></html> :param content: Either a callable, an iterable of events, or a string to insert. :rtype: `Transformer` """ return self.apply(ReplaceTransformation(content)) def before(self, content): """Insert content before selection. In this example we insert the word 'emphasised' before the <em> opening tag: >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('.//em').before('emphasised ')) <html><head><title>Some Title</title></head><body>Some emphasised <em>body</em> text.</body></html> :param content: Either a callable, an iterable of events, or a string to insert. :rtype: `Transformer` """ return self.apply(BeforeTransformation(content)) def after(self, content): """Insert content after selection. Here, we insert some text after the </em> closing tag: >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('.//em').after(' rock')) <html><head><title>Some Title</title></head><body>Some <em>body</em> rock text.</body></html> :param content: Either a callable, an iterable of events, or a string to insert. :rtype: `Transformer` """ return self.apply(AfterTransformation(content)) def prepend(self, content): """Insert content after the ENTER event of the selection. Inserting some new text at the start of the <body>: >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('.//body').prepend('Some new body text. ')) <html><head><title>Some Title</title></head><body>Some new body text. Some <em>body</em> text.</body></html> :param content: Either a callable, an iterable of events, or a string to insert. :rtype: `Transformer` """ return self.apply(PrependTransformation(content)) def append(self, content): """Insert content before the END event of the selection. >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('.//body').append(' Some new body text.')) <html><head><title>Some Title</title></head><body>Some <em>body</em> text. Some new body text.</body></html> :param content: Either a callable, an iterable of events, or a string to insert. :rtype: `Transformer` """ return self.apply(AppendTransformation(content)) #{ Attribute manipulation def attr(self, name, value): """Add, replace or delete an attribute on selected elements. If `value` evaulates to `None` the attribute will be deleted from the element: >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em class="before">body</em> <em>text</em>.</body>' ... '</html>') >>> print(html | Transformer('body/em').attr('class', None)) <html><head><title>Some Title</title></head><body>Some <em>body</em> <em>text</em>.</body></html> Otherwise the attribute will be set to `value`: >>> print(html | Transformer('body/em').attr('class', 'emphasis')) <html><head><title>Some Title</title></head><body>Some <em class="emphasis">body</em> <em class="emphasis">text</em>.</body></html> If `value` is a callable it will be called with the attribute name and the `START` event for the matching element. Its return value will then be used to set the attribute: >>> def print_attr(name, event): ... attrs = event[1][1] ... print(attrs) ... return attrs.get(name) >>> print(html | Transformer('body/em').attr('class', print_attr)) Attrs([(QName('class'), u'before')]) Attrs() <html><head><title>Some Title</title></head><body>Some <em class="before">body</em> <em>text</em>.</body></html> :param name: the name of the attribute :param value: the value that should be set for the attribute. :rtype: `Transformer` """ return self.apply(AttrTransformation(name, value)) #{ Buffer operations def copy(self, buffer, accumulate=False): """Copy selection into buffer. The buffer is replaced by each *contiguous* selection before being passed to the next transformation. If accumulate=True, further selections will be appended to the buffer rather than replacing it. >>> from genshi.builder import tag >>> buffer = StreamBuffer() >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('head/title/text()').copy(buffer) ... .end().select('body').prepend(tag.h1(buffer))) <html><head><title>Some Title</title></head><body><h1>Some Title</h1>Some <em>body</em> text.</body></html> This example illustrates that only a single contiguous selection will be buffered: >>> print(html | Transformer('head/title/text()').copy(buffer) ... .end().select('body/em').copy(buffer).end().select('body') ... .prepend(tag.h1(buffer))) <html><head><title>Some Title</title></head><body><h1>Some Title</h1>Some <em>body</em> text.</body></html> >>> print(buffer) <em>body</em> Element attributes can also be copied for later use: >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body><em>Some</em> <em class="before">body</em>' ... '<em>text</em>.</body></html>') >>> buffer = StreamBuffer() >>> def apply_attr(name, entry): ... return list(buffer)[0][1][1].get('class') >>> print(html | Transformer('body/em[@class]/@class').copy(buffer) ... .end().buffer().select('body/em[not(@class)]') ... .attr('class', apply_attr)) <html><head><title>Some Title</title></head><body><em class="before">Some</em> <em class="before">body</em><em class="before">text</em>.</body></html> :param buffer: the `StreamBuffer` in which the selection should be stored :rtype: `Transformer` :note: Copy (and cut) copy each individual selected object into the buffer before passing to the next transform. For example, the XPath ``*|text()`` will select all elements and text, each instance of which will be copied to the buffer individually before passing to the next transform. This has implications for how ``StreamBuffer`` objects can be used, so some experimentation may be required. """ return self.apply(CopyTransformation(buffer, accumulate)) def cut(self, buffer, accumulate=False): """Copy selection into buffer and remove the selection from the stream. >>> from genshi.builder import tag >>> buffer = StreamBuffer() >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('.//em/text()').cut(buffer) ... .end().select('.//em').after(tag.h1(buffer))) <html><head><title>Some Title</title></head><body>Some <em/><h1>body</h1> text.</body></html> Specifying accumulate=True, appends all selected intervals onto the buffer. Combining this with the .buffer() operation allows us operate on all copied events rather than per-segment. See the documentation on buffer() for more information. :param buffer: the `StreamBuffer` in which the selection should be stored :rtype: `Transformer` :note: this transformation will buffer the entire input stream """ return self.apply(CutTransformation(buffer, accumulate)) def buffer(self): """Buffer the entire stream (can consume a considerable amount of memory). Useful in conjunction with copy(accumulate=True) and cut(accumulate=True) to ensure that all marked events in the entire stream are copied to the buffer before further transformations are applied. For example, to move all <note> elements inside a <notes> tag at the top of the document: >>> doc = HTML('<doc><notes></notes><body>Some <note>one</note> ' ... 'text <note>two</note>.</body></doc>') >>> buffer = StreamBuffer() >>> print(doc | Transformer('body/note').cut(buffer, accumulate=True) ... .end().buffer().select('notes').prepend(buffer)) <doc><notes><note>one</note><note>two</note></notes><body>Some text .</body></doc> """ return self.apply(list) #{ Miscellaneous operations def filter(self, filter): """Apply a normal stream filter to the selection. The filter is called once for each contiguous block of marked events. >>> from genshi.filters.html import HTMLSanitizer >>> html = HTML('<html><body>Some text<script>alert(document.cookie)' ... '</script> and some more text</body></html>') >>> print(html | Transformer('body/*').filter(HTMLSanitizer())) <html><body>Some text and some more text</body></html> :param filter: The stream filter to apply. :rtype: `Transformer` """ return self.apply(FilterTransformation(filter)) def map(self, function, kind): """Applies a function to the ``data`` element of events of ``kind`` in the selection. >>> html = HTML('<html><head><title>Some Title</title></head>' ... '<body>Some <em>body</em> text.</body></html>') >>> print(html | Transformer('head/title').map(unicode.upper, TEXT)) <html><head><title>SOME TITLE</title></head><body>Some <em>body</em> text.</body></html> :param function: the function to apply :param kind: the kind of event the function should be applied to :rtype: `Transformer` """ return self.apply(MapTransformation(function, kind)) def substitute(self, pattern, replace, count=1): """Replace text matching a regular expression. Refer to the documentation for ``re.sub()`` for details. >>> html = HTML('<html><body>Some text, some more text and ' ... '<b>some bold text</b>\\n' ... '<i>some italicised text</i></body></html>') >>> print(html | Transformer('body/b').substitute('(?i)some', 'SOME')) <html><body>Some text, some more text and <b>SOME bold text</b> <i>some italicised text</i></body></html> >>> tags = tag.html(tag.body('Some text, some more text and\\n', ... Markup('<b>some bold text</b>'))) >>> print(tags.generate() | Transformer('body').substitute( ... '(?i)some', 'SOME')) <html><body>SOME text, some more text and <b>SOME bold text</b></body></html> :param pattern: A regular expression object or string. :param replace: Replacement pattern. :param count: Number of replacements to make in each text fragment. :rtype: `Transformer` """ return self.apply(SubstituteTransformation(pattern, replace, count)) def rename(self, name): """Rename matching elements. >>> html = HTML('<html><body>Some text, some more text and ' ... '<b>some bold text</b></body></html>') >>> print(html | Transformer('body/b').rename('strong')) <html><body>Some text, some more text and <strong>some bold text</strong></body></html> """ return self.apply(RenameTransformation(name)) def trace(self, prefix='', fileobj=None): """Print events as they pass through the transform. >>> html = HTML('<body>Some <em>test</em> text</body>') >>> print(html | Transformer('em').trace()) (None, ('START', (QName('body'), Attrs()), (None, 1, 0))) (None, ('TEXT', u'Some ', (None, 1, 6))) ('ENTER', ('START', (QName('em'), Attrs()), (None, 1, 11))) ('INSIDE', ('TEXT', u'test', (None, 1, 15))) ('EXIT', ('END', QName('em'), (None, 1, 19))) (None, ('TEXT', u' text', (None, 1, 24))) (None, ('END', QName('body'), (None, 1, 29))) <body>Some <em>test</em> text</body> :param prefix: a string to prefix each event with in the output :param fileobj: the writable file-like object to write to; defaults to the standard output stream :rtype: `Transformer` """ return self.apply(TraceTransformation(prefix, fileobj=fileobj)) # Internal methods def _mark(self, stream): for event in stream: yield OUTSIDE, event def _unmark(self, stream): for mark, event in stream: kind = event[0] if not (kind is None or kind is ATTR or kind is BREAK): yield event class SelectTransformation(object): """Select and mark events that match an XPath expression.""" def __init__(self, path): """Create selection. :param path: an XPath expression (as string) or a `Path` object """ if not isinstance(path, Path): path = Path(path) self.path = path def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: the marked event stream to filter """ namespaces = {} variables = {} test = self.path.test() stream = iter(stream) next = stream.next for mark, event in stream: if mark is None: yield mark, event continue result = test(event, namespaces, variables) # XXX This is effectively genshi.core._ensure() for transform # streams. if result is True: if event[0] is START: yield ENTER, event depth = 1 while depth > 0: mark, subevent = next() if subevent[0] is START: depth += 1 elif subevent[0] is END: depth -= 1 if depth == 0: yield EXIT, subevent else: yield INSIDE, subevent test(subevent, namespaces, variables, updateonly=True) else: yield OUTSIDE, event elif isinstance(result, Attrs): # XXX Selected *attributes* are given a "kind" of None to # indicate they are not really part of the stream. yield ATTR, (ATTR, (QName(event[1][0] + '@*'), result), event[2]) yield None, event elif isinstance(result, tuple): yield OUTSIDE, result elif result: # XXX Assume everything else is "text"? yield None, (TEXT, unicode(result), (None, -1, -1)) else: yield None, event class InvertTransformation(object): """Invert selection so that marked events become unmarked, and vice versa. Specificaly, all input marks are converted to null marks, and all input null marks are converted to OUTSIDE marks. """ def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: the marked event stream to filter """ for mark, event in stream: if mark: yield None, event else: yield OUTSIDE, event class EndTransformation(object): """End the current selection.""" def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: the marked event stream to filter """ for mark, event in stream: yield OUTSIDE, event class EmptyTransformation(object): """Empty selected elements of all content.""" def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: the marked event stream to filter """ for mark, event in stream: yield mark, event if mark is ENTER: for mark, event in stream: if mark is EXIT: yield mark, event break class RemoveTransformation(object): """Remove selection from the stream.""" def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: the marked event stream to filter """ for mark, event in stream: if mark is None: yield mark, event class UnwrapTransformation(object): """Remove outtermost enclosing elements from selection.""" def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: the marked event stream to filter """ for mark, event in stream: if mark not in (ENTER, EXIT): yield mark, event class WrapTransformation(object): """Wrap selection in an element.""" def __init__(self, element): if isinstance(element, Element): self.element = element else: self.element = Element(element) def __call__(self, stream): for mark, event in stream: if mark: element = list(self.element.generate()) for prefix in element[:-1]: yield None, prefix yield mark, event start = mark stopped = False for mark, event in stream: if start is ENTER and mark is EXIT: yield mark, event stopped = True break if not mark: break yield mark, event else: stopped = True yield None, element[-1] if not stopped: yield mark, event else: yield mark, event class TraceTransformation(object): """Print events as they pass through the transform.""" def __init__(self, prefix='', fileobj=None): """Trace constructor. :param prefix: text to prefix each traced line with. :param fileobj: the writable file-like object to write to """ self.prefix = prefix self.fileobj = fileobj or sys.stdout def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: the marked event stream to filter """ for event in stream: self.fileobj.write('%s%s\n' % (self.prefix, event)) yield event class FilterTransformation(object): """Apply a normal stream filter to the selection. The filter is called once for each selection.""" def __init__(self, filter): """Create the transform. :param filter: The stream filter to apply. """ self.filter = filter def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: The marked event stream to filter """ def flush(queue): if queue: for event in self.filter(queue): yield OUTSIDE, event del queue[:] queue = [] for mark, event in stream: if mark is ENTER: queue.append(event) for mark, event in stream: queue.append(event) if mark is EXIT: break for queue_event in flush(queue): yield queue_event elif mark is OUTSIDE: stopped = False queue.append(event) for mark, event in stream: if mark is not OUTSIDE: break queue.append(event) else: stopped = True for queue_event in flush(queue): yield queue_event if not stopped: yield mark, event else: yield mark, event for queue_event in flush(queue): yield queue_event class MapTransformation(object): """Apply a function to the `data` element of events of ``kind`` in the selection. """ def __init__(self, function, kind): """Create the transform. :param function: the function to apply; the function must take one argument, the `data` element of each selected event :param kind: the stream event ``kind`` to apply the `function` to """ self.function = function self.kind = kind def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: The marked event stream to filter """ for mark, (kind, data, pos) in stream: if mark and self.kind in (None, kind): yield mark, (kind, self.function(data), pos) else: yield mark, (kind, data, pos) class SubstituteTransformation(object): """Replace text matching a regular expression. Refer to the documentation for ``re.sub()`` for details. """ def __init__(self, pattern, replace, count=0): """Create the transform. :param pattern: A regular expression object, or string. :param replace: Replacement pattern. :param count: Number of replacements to make in each text fragment. """ if isinstance(pattern, basestring): self.pattern = re.compile(pattern) else: self.pattern = pattern self.count = count self.replace = replace def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: The marked event stream to filter """ for mark, (kind, data, pos) in stream: if mark is not None and kind is TEXT: new_data = self.pattern.sub(self.replace, data, self.count) if isinstance(data, Markup): data = Markup(new_data) else: data = new_data yield mark, (kind, data, pos) class RenameTransformation(object): """Rename matching elements.""" def __init__(self, name): """Create the transform. :param name: New element name. """ self.name = QName(name) def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: The marked event stream to filter """ for mark, (kind, data, pos) in stream: if mark is ENTER: data = self.name, data[1] elif mark is EXIT: data = self.name yield mark, (kind, data, pos) class InjectorTransformation(object): """Abstract base class for transformations that inject content into a stream. >>> class Top(InjectorTransformation): ... def __call__(self, stream): ... for event in self._inject(): ... yield event ... for event in stream: ... yield event >>> html = HTML('<body>Some <em>test</em> text</body>') >>> print(html | Transformer('.//em').apply(Top('Prefix '))) Prefix <body>Some <em>test</em> text</body> """ def __init__(self, content): """Create a new injector. :param content: An iterable of Genshi stream events, or a string to be injected. """ self.content = content def _inject(self): content = self.content if hasattr(content, '__call__'): content = content() for event in _ensure(content): yield None, event class ReplaceTransformation(InjectorTransformation): """Replace selection with content.""" def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: The marked event stream to filter """ stream = PushBackStream(stream) for mark, event in stream: if mark is not None: start = mark for subevent in self._inject(): yield subevent for mark, event in stream: if start is ENTER: if mark is EXIT: break elif mark != start: stream.push((mark, event)) break else: yield mark, event class BeforeTransformation(InjectorTransformation): """Insert content before selection.""" def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: The marked event stream to filter """ stream = PushBackStream(stream) for mark, event in stream: if mark is not None: start = mark for subevent in self._inject(): yield subevent yield mark, event for mark, event in stream: if mark != start and start is not ENTER: stream.push((mark, event)) break yield mark, event if start is ENTER and mark is EXIT: break else: yield mark, event class AfterTransformation(InjectorTransformation): """Insert content after selection.""" def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: The marked event stream to filter """ stream = PushBackStream(stream) for mark, event in stream: yield mark, event if mark: start = mark for mark, event in stream: if start is not ENTER and mark != start: stream.push((mark, event)) break yield mark, event if start is ENTER and mark is EXIT: break for subevent in self._inject(): yield subevent class PrependTransformation(InjectorTransformation): """Prepend content to the inside of selected elements.""" def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: The marked event stream to filter """ for mark, event in stream: yield mark, event if mark is ENTER: for subevent in self._inject(): yield subevent class AppendTransformation(InjectorTransformation): """Append content after the content of selected elements.""" def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: The marked event stream to filter """ for mark, event in stream: yield mark, event if mark is ENTER: for mark, event in stream: if mark is EXIT: break yield mark, event for subevent in self._inject(): yield subevent yield mark, event class AttrTransformation(object): """Set an attribute on selected elements.""" def __init__(self, name, value): """Construct transform. :param name: name of the attribute that should be set :param value: the value to set """ self.name = name self.value = value def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: The marked event stream to filter """ callable_value = hasattr(self.value, '__call__') for mark, (kind, data, pos) in stream: if mark is ENTER: if callable_value: value = self.value(self.name, (kind, data, pos)) else: value = self.value if value is None: attrs = data[1] - [QName(self.name)] else: attrs = data[1] | [(QName(self.name), value)] data = (data[0], attrs) yield mark, (kind, data, pos) class StreamBuffer(Stream): """Stream event buffer used for cut and copy transformations.""" def __init__(self): """Create the buffer.""" Stream.__init__(self, []) def append(self, event): """Add an event to the buffer. :param event: the markup event to add """ self.events.append(event) def reset(self): """Empty the buffer of events.""" del self.events[:] class CopyTransformation(object): """Copy selected events into a buffer for later insertion.""" def __init__(self, buffer, accumulate=False): """Create the copy transformation. :param buffer: the `StreamBuffer` in which the selection should be stored """ if not accumulate: buffer.reset() self.buffer = buffer self.accumulate = accumulate def __call__(self, stream): """Apply the transformation to the marked stream. :param stream: the marked event stream to filter """ stream = PushBackStream(stream) for mark, event in stream: if mark: if not self.accumulate: self.buffer.reset() events = [(mark, event)] self.buffer.append(event) start = mark for mark, event in stream: if start is not ENTER and mark != start: stream.push((mark, event)) break events.append((mark, event)) self.buffer.append(event) if start is ENTER and mark is EXIT: break for i in events: yield i else: yield mark, event class CutTransformation(object): """Cut selected events into a buffer for later insertion and remove the selection. """ def __init__(self, buffer, accumulate=False): """Create the cut transformation. :param buffer: the `StreamBuffer` in which the selection should be stored """ self.buffer = buffer self.accumulate = accumulate def __call__(self, stream): """Apply the transform filter to the marked stream. :param stream: the marked event stream to filter """ attributes = [] stream = PushBackStream(stream) broken = False if not self.accumulate: self.buffer.reset() for mark, event in stream: if mark: # Send a BREAK event if there was no other event sent between if not self.accumulate: if not broken and self.buffer: yield BREAK, (BREAK, None, None) self.buffer.reset() self.buffer.append(event) start = mark if mark is ATTR: attributes.extend([name for name, _ in event[1][1]]) for mark, event in stream: if start is mark is ATTR: attributes.extend([name for name, _ in event[1][1]]) # Handle non-element contiguous selection if start is not ENTER and mark != start: # Operating on the attributes of a START event if start is ATTR: kind, data, pos = event assert kind is START data = (data[0], data[1] - attributes) attributes = None stream.push((mark, (kind, data, pos))) else: stream.push((mark, event)) break self.buffer.append(event) if start is ENTER and mark is EXIT: break broken = False else: broken = True yield mark, event if not broken and self.buffer: yield BREAK, (BREAK, None, None)
[ [ 8, 0, 0.0237, 0.0267, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0382, 0.0008, 0, 0.66, 0.0233, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0389, 0.0008, 0, 0.66...
[ "\"\"\"A filter for functional-style transformations of markup streams.\n\nThe `Transformer` filter provides a variety of transformations that can be\napplied to parts of streams that match given XPath expressions. These\ntransformations can be chained to achieve results that would be comparitively\ntedious to achi...
# -*- coding: utf-8 -*- # # Copyright (C) 2007 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest from pprint import pprint import unittest from genshi import HTML from genshi.builder import Element from genshi.core import START, END, TEXT, QName, Attrs from genshi.filters.transform import Transformer, StreamBuffer, ENTER, EXIT, \ OUTSIDE, INSIDE, ATTR, BREAK import genshi.filters.transform FOO = '<root>ROOT<foo name="foo">FOO</foo></root>' FOOBAR = '<root>ROOT<foo name="foo" size="100">FOO</foo><bar name="bar">BAR</bar></root>' def _simplify(stream, with_attrs=False): """Simplify a marked stream.""" def _generate(): for mark, (kind, data, pos) in stream: if kind is START: if with_attrs: data = (unicode(data[0]), dict((unicode(k), v) for k, v in data[1])) else: data = unicode(data[0]) elif kind is END: data = unicode(data) elif kind is ATTR: kind = ATTR data = dict((unicode(k), v) for k, v in data[1]) yield mark, kind, data return list(_generate()) def _transform(html, transformer, with_attrs=False): """Apply transformation returning simplified marked stream.""" if isinstance(html, basestring): html = HTML(html) stream = transformer(html, keep_marks=True) return _simplify(stream, with_attrs) class SelectTest(unittest.TestCase): """Test .select()""" def _select(self, select): html = HTML(FOOBAR) if isinstance(select, basestring): select = [select] transformer = Transformer(select[0]) for sel in select[1:]: transformer = transformer.select(sel) return _transform(html, transformer) def test_select_single_element(self): self.assertEqual( self._select('foo'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')], ) def test_select_context(self): self.assertEqual( self._select('.'), [(ENTER, START, u'root'), (INSIDE, TEXT, u'ROOT'), (INSIDE, START, u'foo'), (INSIDE, TEXT, u'FOO'), (INSIDE, END, u'foo'), (INSIDE, START, u'bar'), (INSIDE, TEXT, u'BAR'), (INSIDE, END, u'bar'), (EXIT, END, u'root')] ) def test_select_inside_select(self): self.assertEqual( self._select(['.', 'foo']), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')], ) def test_select_text(self): self.assertEqual( self._select('*/text()'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'foo'), (OUTSIDE, TEXT, u'FOO'), (None, END, u'foo'), (None, START, u'bar'), (OUTSIDE, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')], ) def test_select_attr(self): self.assertEqual( self._select('foo/@name'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ATTR, ATTR, {'name': u'foo'}), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_select_text_context(self): self.assertEqual( list(Transformer('.')(HTML('foo'), keep_marks=True)), [('OUTSIDE', ('TEXT', u'foo', (None, 1, 0)))], ) class InvertTest(unittest.TestCase): def _invert(self, select): return _transform(FOO, Transformer(select).invert()) def test_invert_element(self): self.assertEqual( self._invert('foo'), [(OUTSIDE, START, u'root'), (OUTSIDE, TEXT, u'ROOT'), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (OUTSIDE, END, u'root')] ) def test_invert_inverted_element(self): self.assertEqual( _transform(FOO, Transformer('foo').invert().invert()), [(None, START, u'root'), (None, TEXT, u'ROOT'), (OUTSIDE, START, u'foo'), (OUTSIDE, TEXT, u'FOO'), (OUTSIDE, END, u'foo'), (None, END, u'root')] ) def test_invert_text(self): self.assertEqual( self._invert('foo/text()'), [(OUTSIDE, START, u'root'), (OUTSIDE, TEXT, u'ROOT'), (OUTSIDE, START, u'foo'), (None, TEXT, u'FOO'), (OUTSIDE, END, u'foo'), (OUTSIDE, END, u'root')] ) def test_invert_attribute(self): self.assertEqual( self._invert('foo/@name'), [(OUTSIDE, START, u'root'), (OUTSIDE, TEXT, u'ROOT'), (None, ATTR, {'name': u'foo'}), (OUTSIDE, START, u'foo'), (OUTSIDE, TEXT, u'FOO'), (OUTSIDE, END, u'foo'), (OUTSIDE, END, u'root')] ) def test_invert_context(self): self.assertEqual( self._invert('.'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, END, u'root')] ) def test_invert_text_context(self): self.assertEqual( _simplify(Transformer('.').invert()(HTML('foo'), keep_marks=True)), [(None, 'TEXT', u'foo')], ) class EndTest(unittest.TestCase): def test_end(self): stream = _transform(FOO, Transformer('foo').end()) self.assertEqual( stream, [(OUTSIDE, START, u'root'), (OUTSIDE, TEXT, u'ROOT'), (OUTSIDE, START, u'foo'), (OUTSIDE, TEXT, u'FOO'), (OUTSIDE, END, u'foo'), (OUTSIDE, END, u'root')] ) class EmptyTest(unittest.TestCase): def _empty(self, select): return _transform(FOO, Transformer(select).empty()) def test_empty_element(self): self.assertEqual( self._empty('foo'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (EXIT, END, u'foo'), (None, END, u'root')], ) def test_empty_text(self): self.assertEqual( self._empty('foo/text()'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'foo'), (OUTSIDE, TEXT, u'FOO'), (None, END, u'foo'), (None, END, u'root')] ) def test_empty_attr(self): self.assertEqual( self._empty('foo/@name'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ATTR, ATTR, {'name': u'foo'}), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, END, u'root')] ) def test_empty_context(self): self.assertEqual( self._empty('.'), [(ENTER, START, u'root'), (EXIT, END, u'root')] ) def test_empty_text_context(self): self.assertEqual( _simplify(Transformer('.')(HTML('foo'), keep_marks=True)), [(OUTSIDE, TEXT, u'foo')], ) class RemoveTest(unittest.TestCase): def _remove(self, select): return _transform(FOO, Transformer(select).remove()) def test_remove_element(self): self.assertEqual( self._remove('foo|bar'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, END, u'root')] ) def test_remove_text(self): self.assertEqual( self._remove('//text()'), [(None, START, u'root'), (None, START, u'foo'), (None, END, u'foo'), (None, END, u'root')] ) def test_remove_attr(self): self.assertEqual( self._remove('foo/@name'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, END, u'root')] ) def test_remove_context(self): self.assertEqual( self._remove('.'), [], ) def test_remove_text_context(self): self.assertEqual( _transform('foo', Transformer('.').remove()), [], ) class UnwrapText(unittest.TestCase): def _unwrap(self, select): return _transform(FOO, Transformer(select).unwrap()) def test_unwrap_element(self): self.assertEqual( self._unwrap('foo'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (INSIDE, TEXT, u'FOO'), (None, END, u'root')] ) def test_unwrap_text(self): self.assertEqual( self._unwrap('foo/text()'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'foo'), (OUTSIDE, TEXT, u'FOO'), (None, END, u'foo'), (None, END, u'root')] ) def test_unwrap_attr(self): self.assertEqual( self._unwrap('foo/@name'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ATTR, ATTR, {'name': u'foo'}), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, END, u'root')] ) def test_unwrap_adjacent(self): self.assertEqual( _transform(FOOBAR, Transformer('foo|bar').unwrap()), [(None, START, u'root'), (None, TEXT, u'ROOT'), (INSIDE, TEXT, u'FOO'), (INSIDE, TEXT, u'BAR'), (None, END, u'root')] ) def test_unwrap_root(self): self.assertEqual( self._unwrap('.'), [(INSIDE, TEXT, u'ROOT'), (INSIDE, START, u'foo'), (INSIDE, TEXT, u'FOO'), (INSIDE, END, u'foo')] ) def test_unwrap_text_root(self): self.assertEqual( _transform('foo', Transformer('.').unwrap()), [(OUTSIDE, TEXT, 'foo')], ) class WrapTest(unittest.TestCase): def _wrap(self, select, wrap='wrap'): return _transform(FOO, Transformer(select).wrap(wrap)) def test_wrap_element(self): self.assertEqual( self._wrap('foo'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'wrap'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, END, u'wrap'), (None, END, u'root')] ) def test_wrap_adjacent_elements(self): self.assertEqual( _transform(FOOBAR, Transformer('foo|bar').wrap('wrap')), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'wrap'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, END, u'wrap'), (None, START, u'wrap'), (ENTER, START, u'bar'), (INSIDE, TEXT, u'BAR'), (EXIT, END, u'bar'), (None, END, u'wrap'), (None, END, u'root')] ) def test_wrap_text(self): self.assertEqual( self._wrap('foo/text()'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'foo'), (None, START, u'wrap'), (OUTSIDE, TEXT, u'FOO'), (None, END, u'wrap'), (None, END, u'foo'), (None, END, u'root')] ) def test_wrap_root(self): self.assertEqual( self._wrap('.'), [(None, START, u'wrap'), (ENTER, START, u'root'), (INSIDE, TEXT, u'ROOT'), (INSIDE, START, u'foo'), (INSIDE, TEXT, u'FOO'), (INSIDE, END, u'foo'), (EXIT, END, u'root'), (None, END, u'wrap')] ) def test_wrap_text_root(self): self.assertEqual( _transform('foo', Transformer('.').wrap('wrap')), [(None, START, u'wrap'), (OUTSIDE, TEXT, u'foo'), (None, END, u'wrap')], ) def test_wrap_with_element(self): element = Element('a', href='http://localhost') self.assertEqual( _transform('foo', Transformer('.').wrap(element), with_attrs=True), [(None, START, (u'a', {u'href': u'http://localhost'})), (OUTSIDE, TEXT, u'foo'), (None, END, u'a')] ) class FilterTest(unittest.TestCase): def _filter(self, select, html=FOOBAR): """Returns a list of lists of filtered elements.""" output = [] def filtered(stream): interval = [] output.append(interval) for event in stream: interval.append(event) yield event _transform(html, Transformer(select).filter(filtered)) simplified = [] for sub in output: simplified.append(_simplify([(None, event) for event in sub])) return simplified def test_filter_element(self): self.assertEqual( self._filter('foo'), [[(None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo')]] ) def test_filter_adjacent_elements(self): self.assertEqual( self._filter('foo|bar'), [[(None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo')], [(None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar')]] ) def test_filter_text(self): self.assertEqual( self._filter('*/text()'), [[(None, TEXT, u'FOO')], [(None, TEXT, u'BAR')]] ) def test_filter_root(self): self.assertEqual( self._filter('.'), [[(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')]] ) def test_filter_text_root(self): self.assertEqual( self._filter('.', 'foo'), [[(None, TEXT, u'foo')]]) def test_filter_after_outside(self): stream = _transform( '<root>x</root>', Transformer('//root/text()').filter(lambda x: x)) self.assertEqual( list(stream), [(None, START, u'root'), (OUTSIDE, TEXT, u'x'), (None, END, u'root')]) class MapTest(unittest.TestCase): def _map(self, select, kind=None): data = [] def record(d): data.append(d) return d _transform(FOOBAR, Transformer(select).map(record, kind)) return data def test_map_element(self): self.assertEqual( self._map('foo'), [(QName('foo'), Attrs([(QName('name'), u'foo'), (QName('size'), u'100')])), u'FOO', QName('foo')] ) def test_map_with_text_kind(self): self.assertEqual( self._map('.', TEXT), [u'ROOT', u'FOO', u'BAR'] ) def test_map_with_root_and_end_kind(self): self.assertEqual( self._map('.', END), [QName('foo'), QName('bar'), QName('root')] ) def test_map_with_attribute(self): self.assertEqual( self._map('foo/@name'), [(QName('foo@*'), Attrs([('name', u'foo')]))] ) class SubstituteTest(unittest.TestCase): def _substitute(self, select, pattern, replace): return _transform(FOOBAR, Transformer(select).substitute(pattern, replace)) def test_substitute_foo(self): self.assertEqual( self._substitute('foo', 'FOO|BAR', 'FOOOOO'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOOOOO'), (EXIT, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_substitute_foobar_with_group(self): self.assertEqual( self._substitute('foo|bar', '(FOO|BAR)', r'(\1)'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'(FOO)'), (EXIT, END, u'foo'), (ENTER, START, u'bar'), (INSIDE, TEXT, u'(BAR)'), (EXIT, END, u'bar'), (None, END, u'root')] ) class RenameTest(unittest.TestCase): def _rename(self, select): return _transform(FOOBAR, Transformer(select).rename('foobar')) def test_rename_root(self): self.assertEqual( self._rename('.'), [(ENTER, START, u'foobar'), (INSIDE, TEXT, u'ROOT'), (INSIDE, START, u'foo'), (INSIDE, TEXT, u'FOO'), (INSIDE, END, u'foo'), (INSIDE, START, u'bar'), (INSIDE, TEXT, u'BAR'), (INSIDE, END, u'bar'), (EXIT, END, u'foobar')] ) def test_rename_element(self): self.assertEqual( self._rename('foo|bar'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foobar'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foobar'), (ENTER, START, u'foobar'), (INSIDE, TEXT, u'BAR'), (EXIT, END, u'foobar'), (None, END, u'root')] ) def test_rename_text(self): self.assertEqual( self._rename('foo/text()'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'foo'), (OUTSIDE, TEXT, u'FOO'), (None, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) class ContentTestMixin(object): def _apply(self, select, content=None, html=FOOBAR): class Injector(object): count = 0 def __iter__(self): self.count += 1 return iter(HTML('CONTENT %i' % self.count)) if isinstance(html, basestring): html = HTML(html) if content is None: content = Injector() elif isinstance(content, basestring): content = HTML(content) return _transform(html, getattr(Transformer(select), self.operation) (content)) class ReplaceTest(unittest.TestCase, ContentTestMixin): operation = 'replace' def test_replace_element(self): self.assertEqual( self._apply('foo'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, TEXT, u'CONTENT 1'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_replace_text(self): self.assertEqual( self._apply('text()'), [(None, START, u'root'), (None, TEXT, u'CONTENT 1'), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_replace_context(self): self.assertEqual( self._apply('.'), [(None, TEXT, u'CONTENT 1')], ) def test_replace_text_context(self): self.assertEqual( self._apply('.', html='foo'), [(None, TEXT, u'CONTENT 1')], ) def test_replace_adjacent_elements(self): self.assertEqual( self._apply('*'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, TEXT, u'CONTENT 1'), (None, TEXT, u'CONTENT 2'), (None, END, u'root')], ) def test_replace_all(self): self.assertEqual( self._apply('*|text()'), [(None, START, u'root'), (None, TEXT, u'CONTENT 1'), (None, TEXT, u'CONTENT 2'), (None, TEXT, u'CONTENT 3'), (None, END, u'root')], ) def test_replace_with_callback(self): count = [0] def content(): count[0] += 1 yield '%2i.' % count[0] self.assertEqual( self._apply('*', content), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, TEXT, u' 1.'), (None, TEXT, u' 2.'), (None, END, u'root')] ) class BeforeTest(unittest.TestCase, ContentTestMixin): operation = 'before' def test_before_element(self): self.assertEqual( self._apply('foo'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, TEXT, u'CONTENT 1'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_before_text(self): self.assertEqual( self._apply('text()'), [(None, START, u'root'), (None, TEXT, u'CONTENT 1'), (OUTSIDE, TEXT, u'ROOT'), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_before_context(self): self.assertEqual( self._apply('.'), [(None, TEXT, u'CONTENT 1'), (ENTER, START, u'root'), (INSIDE, TEXT, u'ROOT'), (INSIDE, START, u'foo'), (INSIDE, TEXT, u'FOO'), (INSIDE, END, u'foo'), (INSIDE, START, u'bar'), (INSIDE, TEXT, u'BAR'), (INSIDE, END, u'bar'), (EXIT, END, u'root')] ) def test_before_text_context(self): self.assertEqual( self._apply('.', html='foo'), [(None, TEXT, u'CONTENT 1'), (OUTSIDE, TEXT, u'foo')] ) def test_before_adjacent_elements(self): self.assertEqual( self._apply('*'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (None, TEXT, u'CONTENT 1'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, TEXT, u'CONTENT 2'), (ENTER, START, u'bar'), (INSIDE, TEXT, u'BAR'), (EXIT, END, u'bar'), (None, END, u'root')] ) def test_before_all(self): self.assertEqual( self._apply('*|text()'), [(None, START, u'root'), (None, TEXT, u'CONTENT 1'), (OUTSIDE, TEXT, u'ROOT'), (None, TEXT, u'CONTENT 2'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, TEXT, u'CONTENT 3'), (ENTER, START, u'bar'), (INSIDE, TEXT, u'BAR'), (EXIT, END, u'bar'), (None, END, u'root')] ) def test_before_with_callback(self): count = [0] def content(): count[0] += 1 yield '%2i.' % count[0] self.assertEqual( self._apply('foo/text()', content), [(None, 'START', u'root'), (None, 'TEXT', u'ROOT'), (None, 'START', u'foo'), (None, 'TEXT', u' 1.'), ('OUTSIDE', 'TEXT', u'FOO'), (None, 'END', u'foo'), (None, 'START', u'bar'), (None, 'TEXT', u'BAR'), (None, 'END', u'bar'), (None, 'END', u'root')] ) class AfterTest(unittest.TestCase, ContentTestMixin): operation = 'after' def test_after_element(self): self.assertEqual( self._apply('foo'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, TEXT, u'CONTENT 1'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_after_text(self): self.assertEqual( self._apply('text()'), [(None, START, u'root'), (OUTSIDE, TEXT, u'ROOT'), (None, TEXT, u'CONTENT 1'), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_after_context(self): self.assertEqual( self._apply('.'), [(ENTER, START, u'root'), (INSIDE, TEXT, u'ROOT'), (INSIDE, START, u'foo'), (INSIDE, TEXT, u'FOO'), (INSIDE, END, u'foo'), (INSIDE, START, u'bar'), (INSIDE, TEXT, u'BAR'), (INSIDE, END, u'bar'), (EXIT, END, u'root'), (None, TEXT, u'CONTENT 1')] ) def test_after_text_context(self): self.assertEqual( self._apply('.', html='foo'), [(OUTSIDE, TEXT, u'foo'), (None, TEXT, u'CONTENT 1')] ) def test_after_adjacent_elements(self): self.assertEqual( self._apply('*'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, TEXT, u'CONTENT 1'), (ENTER, START, u'bar'), (INSIDE, TEXT, u'BAR'), (EXIT, END, u'bar'), (None, TEXT, u'CONTENT 2'), (None, END, u'root')] ) def test_after_all(self): self.assertEqual( self._apply('*|text()'), [(None, START, u'root'), (OUTSIDE, TEXT, u'ROOT'), (None, TEXT, u'CONTENT 1'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, TEXT, u'CONTENT 2'), (ENTER, START, u'bar'), (INSIDE, TEXT, u'BAR'), (EXIT, END, u'bar'), (None, TEXT, u'CONTENT 3'), (None, END, u'root')] ) def test_after_with_callback(self): count = [0] def content(): count[0] += 1 yield '%2i.' % count[0] self.assertEqual( self._apply('foo/text()', content), [(None, 'START', u'root'), (None, 'TEXT', u'ROOT'), (None, 'START', u'foo'), ('OUTSIDE', 'TEXT', u'FOO'), (None, 'TEXT', u' 1.'), (None, 'END', u'foo'), (None, 'START', u'bar'), (None, 'TEXT', u'BAR'), (None, 'END', u'bar'), (None, 'END', u'root')] ) class PrependTest(unittest.TestCase, ContentTestMixin): operation = 'prepend' def test_prepend_element(self): self.assertEqual( self._apply('foo'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (None, TEXT, u'CONTENT 1'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_prepend_text(self): self.assertEqual( self._apply('text()'), [(None, START, u'root'), (OUTSIDE, TEXT, u'ROOT'), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_prepend_context(self): self.assertEqual( self._apply('.'), [(ENTER, START, u'root'), (None, TEXT, u'CONTENT 1'), (INSIDE, TEXT, u'ROOT'), (INSIDE, START, u'foo'), (INSIDE, TEXT, u'FOO'), (INSIDE, END, u'foo'), (INSIDE, START, u'bar'), (INSIDE, TEXT, u'BAR'), (INSIDE, END, u'bar'), (EXIT, END, u'root')], ) def test_prepend_text_context(self): self.assertEqual( self._apply('.', html='foo'), [(OUTSIDE, TEXT, u'foo')] ) def test_prepend_adjacent_elements(self): self.assertEqual( self._apply('*'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (None, TEXT, u'CONTENT 1'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (ENTER, START, u'bar'), (None, TEXT, u'CONTENT 2'), (INSIDE, TEXT, u'BAR'), (EXIT, END, u'bar'), (None, END, u'root')] ) def test_prepend_all(self): self.assertEqual( self._apply('*|text()'), [(None, START, u'root'), (OUTSIDE, TEXT, u'ROOT'), (ENTER, START, u'foo'), (None, TEXT, u'CONTENT 1'), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (ENTER, START, u'bar'), (None, TEXT, u'CONTENT 2'), (INSIDE, TEXT, u'BAR'), (EXIT, END, u'bar'), (None, END, u'root')] ) def test_prepend_with_callback(self): count = [0] def content(): count[0] += 1 yield '%2i.' % count[0] self.assertEqual( self._apply('foo', content), [(None, 'START', u'root'), (None, 'TEXT', u'ROOT'), (ENTER, 'START', u'foo'), (None, 'TEXT', u' 1.'), (INSIDE, 'TEXT', u'FOO'), (EXIT, 'END', u'foo'), (None, 'START', u'bar'), (None, 'TEXT', u'BAR'), (None, 'END', u'bar'), (None, 'END', u'root')] ) class AppendTest(unittest.TestCase, ContentTestMixin): operation = 'append' def test_append_element(self): self.assertEqual( self._apply('foo'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (None, TEXT, u'CONTENT 1'), (EXIT, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_append_text(self): self.assertEqual( self._apply('text()'), [(None, START, u'root'), (OUTSIDE, TEXT, u'ROOT'), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_append_context(self): self.assertEqual( self._apply('.'), [(ENTER, START, u'root'), (INSIDE, TEXT, u'ROOT'), (INSIDE, START, u'foo'), (INSIDE, TEXT, u'FOO'), (INSIDE, END, u'foo'), (INSIDE, START, u'bar'), (INSIDE, TEXT, u'BAR'), (INSIDE, END, u'bar'), (None, TEXT, u'CONTENT 1'), (EXIT, END, u'root')], ) def test_append_text_context(self): self.assertEqual( self._apply('.', html='foo'), [(OUTSIDE, TEXT, u'foo')] ) def test_append_adjacent_elements(self): self.assertEqual( self._apply('*'), [(None, START, u'root'), (None, TEXT, u'ROOT'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (None, TEXT, u'CONTENT 1'), (EXIT, END, u'foo'), (ENTER, START, u'bar'), (INSIDE, TEXT, u'BAR'), (None, TEXT, u'CONTENT 2'), (EXIT, END, u'bar'), (None, END, u'root')] ) def test_append_all(self): self.assertEqual( self._apply('*|text()'), [(None, START, u'root'), (OUTSIDE, TEXT, u'ROOT'), (ENTER, START, u'foo'), (INSIDE, TEXT, u'FOO'), (None, TEXT, u'CONTENT 1'), (EXIT, END, u'foo'), (ENTER, START, u'bar'), (INSIDE, TEXT, u'BAR'), (None, TEXT, u'CONTENT 2'), (EXIT, END, u'bar'), (None, END, u'root')] ) def test_append_with_callback(self): count = [0] def content(): count[0] += 1 yield '%2i.' % count[0] self.assertEqual( self._apply('foo', content), [(None, 'START', u'root'), (None, 'TEXT', u'ROOT'), (ENTER, 'START', u'foo'), (INSIDE, 'TEXT', u'FOO'), (None, 'TEXT', u' 1.'), (EXIT, 'END', u'foo'), (None, 'START', u'bar'), (None, 'TEXT', u'BAR'), (None, 'END', u'bar'), (None, 'END', u'root')] ) class AttrTest(unittest.TestCase): def _attr(self, select, name, value): return _transform(FOOBAR, Transformer(select).attr(name, value), with_attrs=True) def test_set_existing_attr(self): self.assertEqual( self._attr('foo', 'name', 'FOO'), [(None, START, (u'root', {})), (None, TEXT, u'ROOT'), (ENTER, START, (u'foo', {u'name': 'FOO', u'size': '100'})), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, START, (u'bar', {u'name': u'bar'})), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_set_new_attr(self): self.assertEqual( self._attr('foo', 'title', 'FOO'), [(None, START, (u'root', {})), (None, TEXT, u'ROOT'), (ENTER, START, (u'foo', {u'name': u'foo', u'title': 'FOO', u'size': '100'})), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, START, (u'bar', {u'name': u'bar'})), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_attr_from_function(self): def set(name, event): self.assertEqual(name, 'name') return event[1][1].get('name').upper() self.assertEqual( self._attr('foo|bar', 'name', set), [(None, START, (u'root', {})), (None, TEXT, u'ROOT'), (ENTER, START, (u'foo', {u'name': 'FOO', u'size': '100'})), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (ENTER, START, (u'bar', {u'name': 'BAR'})), (INSIDE, TEXT, u'BAR'), (EXIT, END, u'bar'), (None, END, u'root')] ) def test_remove_attr(self): self.assertEqual( self._attr('foo', 'name', None), [(None, START, (u'root', {})), (None, TEXT, u'ROOT'), (ENTER, START, (u'foo', {u'size': '100'})), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, START, (u'bar', {u'name': u'bar'})), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) def test_remove_attr_with_function(self): def set(name, event): return None self.assertEqual( self._attr('foo', 'name', set), [(None, START, (u'root', {})), (None, TEXT, u'ROOT'), (ENTER, START, (u'foo', {u'size': '100'})), (INSIDE, TEXT, u'FOO'), (EXIT, END, u'foo'), (None, START, (u'bar', {u'name': u'bar'})), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')] ) class BufferTestMixin(object): def _apply(self, select, with_attrs=False): buffer = StreamBuffer() events = buffer.events class Trace(object): last = None trace = [] def __call__(self, stream): for event in stream: if events and hash(tuple(events)) != self.last: self.last = hash(tuple(events)) self.trace.append(list(events)) yield event trace = Trace() output = _transform(FOOBAR, getattr(Transformer(select), self.operation) (buffer).apply(trace), with_attrs=with_attrs) simplified = [] for interval in trace.trace: simplified.append(_simplify([(None, e) for e in interval], with_attrs=with_attrs)) return output, simplified class CopyTest(unittest.TestCase, BufferTestMixin): operation = 'copy' def test_copy_element(self): self.assertEqual( self._apply('foo')[1], [[(None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo')]] ) def test_copy_adjacent_elements(self): self.assertEqual( self._apply('foo|bar')[1], [[(None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo')], [(None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar')]] ) def test_copy_all(self): self.assertEqual( self._apply('*|text()')[1], [[(None, TEXT, u'ROOT')], [(None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo')], [(None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar')]] ) def test_copy_text(self): self.assertEqual( self._apply('*/text()')[1], [[(None, TEXT, u'FOO')], [(None, TEXT, u'BAR')]] ) def test_copy_context(self): self.assertEqual( self._apply('.')[1], [[(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')]] ) def test_copy_attribute(self): self.assertEqual( self._apply('foo/@name', with_attrs=True)[1], [[(None, ATTR, {'name': u'foo'})]] ) def test_copy_attributes(self): self.assertEqual( self._apply('foo/@*', with_attrs=True)[1], [[(None, ATTR, {u'name': u'foo', u'size': u'100'})]] ) class CutTest(unittest.TestCase, BufferTestMixin): operation = 'cut' def test_cut_element(self): self.assertEqual( self._apply('foo'), ([(None, START, u'root'), (None, TEXT, u'ROOT'), (None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')], [[(None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo')]]) ) def test_cut_adjacent_elements(self): self.assertEqual( self._apply('foo|bar'), ([(None, START, u'root'), (None, TEXT, u'ROOT'), (BREAK, BREAK, None), (None, END, u'root')], [[(None, START, u'foo'), (None, TEXT, u'FOO'), (None, END, u'foo')], [(None, START, u'bar'), (None, TEXT, u'BAR'), (None, END, u'bar')]]) ) def test_cut_all(self): self.assertEqual( self._apply('*|text()'), ([(None, 'START', u'root'), ('BREAK', 'BREAK', None), ('BREAK', 'BREAK', None), (None, 'END', u'root')], [[(None, 'TEXT', u'ROOT')], [(None, 'START', u'foo'), (None, 'TEXT', u'FOO'), (None, 'END', u'foo')], [(None, 'START', u'bar'), (None, 'TEXT', u'BAR'), (None, 'END', u'bar')]]) ) def test_cut_text(self): self.assertEqual( self._apply('*/text()'), ([(None, 'START', u'root'), (None, 'TEXT', u'ROOT'), (None, 'START', u'foo'), (None, 'END', u'foo'), (None, 'START', u'bar'), (None, 'END', u'bar'), (None, 'END', u'root')], [[(None, 'TEXT', u'FOO')], [(None, 'TEXT', u'BAR')]]) ) def test_cut_context(self): self.assertEqual( self._apply('.')[1], [[(None, 'START', u'root'), (None, 'TEXT', u'ROOT'), (None, 'START', u'foo'), (None, 'TEXT', u'FOO'), (None, 'END', u'foo'), (None, 'START', u'bar'), (None, 'TEXT', u'BAR'), (None, 'END', u'bar'), (None, 'END', u'root')]] ) def test_cut_attribute(self): self.assertEqual( self._apply('foo/@name', with_attrs=True), ([(None, START, (u'root', {})), (None, TEXT, u'ROOT'), (None, START, (u'foo', {u'size': u'100'})), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, START, (u'bar', {u'name': u'bar'})), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')], [[(None, ATTR, {u'name': u'foo'})]]) ) def test_cut_attributes(self): self.assertEqual( self._apply('foo/@*', with_attrs=True), ([(None, START, (u'root', {})), (None, TEXT, u'ROOT'), (None, START, (u'foo', {})), (None, TEXT, u'FOO'), (None, END, u'foo'), (None, START, (u'bar', {u'name': u'bar'})), (None, TEXT, u'BAR'), (None, END, u'bar'), (None, END, u'root')], [[(None, ATTR, {u'name': u'foo', u'size': u'100'})]]) ) # XXX Test this when the XPath implementation is fixed (#233). # def test_cut_attribute_or_attribute(self): # self.assertEqual( # self._apply('foo/@name | foo/@size', with_attrs=True), # ([(None, START, (u'root', {})), # (None, TEXT, u'ROOT'), # (None, START, (u'foo', {})), # (None, TEXT, u'FOO'), # (None, END, u'foo'), # (None, START, (u'bar', {u'name': u'bar'})), # (None, TEXT, u'BAR'), # (None, END, u'bar'), # (None, END, u'root')], # [[(None, ATTR, {u'name': u'foo', u'size': u'100'})]]) # ) def suite(): from genshi.input import HTML from genshi.core import Markup from genshi.builder import tag suite = unittest.TestSuite() for test in (SelectTest, InvertTest, EndTest, EmptyTest, RemoveTest, UnwrapText, WrapTest, FilterTest, MapTest, SubstituteTest, RenameTest, ReplaceTest, BeforeTest, AfterTest, PrependTest, AppendTest, AttrTest, CopyTest, CutTest): suite.addTest(unittest.makeSuite(test, 'test')) suite.addTest(doctest.DocTestSuite( genshi.filters.transform, optionflags=doctest.NORMALIZE_WHITESPACE, extraglobs={'HTML': HTML, 'tag': tag, 'Markup': Markup})) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.0094, 0.0007, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.01, 0.0007, 0, 0.66, 0.0294, 276, 0, 1, 0, 0, 276, 0, 0 ], [ 1, 0, 0.0107, 0.0007, 0, 0....
[ "import doctest", "from pprint import pprint", "import unittest", "from genshi import HTML", "from genshi.builder import Element", "from genshi.core import START, END, TEXT, QName, Attrs", "from genshi.filters.transform import Transformer, StreamBuffer, ENTER, EXIT, \\\n ...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import unittest from genshi.input import HTML, ParseError from genshi.filters.html import HTMLFormFiller, HTMLSanitizer from genshi.template import MarkupTemplate class HTMLFormFillerTestCase(unittest.TestCase): def test_fill_input_text_no_value(self): html = HTML("""<form><p> <input type="text" name="foo" /> </p></form>""") | HTMLFormFiller() self.assertEquals("""<form><p> <input type="text" name="foo"/> </p></form>""", html.render()) def test_fill_input_text_single_value(self): html = HTML("""<form><p> <input type="text" name="foo" /> </p></form>""") | HTMLFormFiller(data={'foo': 'bar'}) self.assertEquals("""<form><p> <input type="text" name="foo" value="bar"/> </p></form>""", html.render()) def test_fill_input_text_multi_value(self): html = HTML("""<form><p> <input type="text" name="foo" /> </p></form>""") | HTMLFormFiller(data={'foo': ['bar']}) self.assertEquals("""<form><p> <input type="text" name="foo" value="bar"/> </p></form>""", html.render()) def test_fill_input_hidden_no_value(self): html = HTML("""<form><p> <input type="hidden" name="foo" /> </p></form>""") | HTMLFormFiller() self.assertEquals("""<form><p> <input type="hidden" name="foo"/> </p></form>""", html.render()) def test_fill_input_hidden_single_value(self): html = HTML("""<form><p> <input type="hidden" name="foo" /> </p></form>""") | HTMLFormFiller(data={'foo': 'bar'}) self.assertEquals("""<form><p> <input type="hidden" name="foo" value="bar"/> </p></form>""", html.render()) def test_fill_input_hidden_multi_value(self): html = HTML("""<form><p> <input type="hidden" name="foo" /> </p></form>""") | HTMLFormFiller(data={'foo': ['bar']}) self.assertEquals("""<form><p> <input type="hidden" name="foo" value="bar"/> </p></form>""", html.render()) def test_fill_textarea_no_value(self): html = HTML("""<form><p> <textarea name="foo"></textarea> </p></form>""") | HTMLFormFiller() self.assertEquals("""<form><p> <textarea name="foo"/> </p></form>""", html.render()) def test_fill_textarea_single_value(self): html = HTML("""<form><p> <textarea name="foo"></textarea> </p></form>""") | HTMLFormFiller(data={'foo': 'bar'}) self.assertEquals("""<form><p> <textarea name="foo">bar</textarea> </p></form>""", html.render()) def test_fill_textarea_multi_value(self): html = HTML("""<form><p> <textarea name="foo"></textarea> </p></form>""") | HTMLFormFiller(data={'foo': ['bar']}) self.assertEquals("""<form><p> <textarea name="foo">bar</textarea> </p></form>""", html.render()) def test_fill_input_checkbox_no_value(self): html = HTML("""<form><p> <input type="checkbox" name="foo" /> </p></form>""") | HTMLFormFiller() self.assertEquals("""<form><p> <input type="checkbox" name="foo"/> </p></form>""", html.render()) def test_fill_input_checkbox_single_value_auto(self): html = HTML("""<form><p> <input type="checkbox" name="foo" /> </p></form>""") self.assertEquals("""<form><p> <input type="checkbox" name="foo"/> </p></form>""", (html | HTMLFormFiller(data={'foo': ''})).render()) self.assertEquals("""<form><p> <input type="checkbox" name="foo" checked="checked"/> </p></form>""", (html | HTMLFormFiller(data={'foo': 'on'})).render()) def test_fill_input_checkbox_single_value_defined(self): html = HTML("""<form><p> <input type="checkbox" name="foo" value="1" /> </p></form>""") self.assertEquals("""<form><p> <input type="checkbox" name="foo" value="1" checked="checked"/> </p></form>""", (html | HTMLFormFiller(data={'foo': '1'})).render()) self.assertEquals("""<form><p> <input type="checkbox" name="foo" value="1"/> </p></form>""", (html | HTMLFormFiller(data={'foo': '2'})).render()) def test_fill_input_checkbox_multi_value_auto(self): html = HTML("""<form><p> <input type="checkbox" name="foo" /> </p></form>""") self.assertEquals("""<form><p> <input type="checkbox" name="foo"/> </p></form>""", (html | HTMLFormFiller(data={'foo': []})).render()) self.assertEquals("""<form><p> <input type="checkbox" name="foo" checked="checked"/> </p></form>""", (html | HTMLFormFiller(data={'foo': ['on']})).render()) def test_fill_input_checkbox_multi_value_defined(self): html = HTML("""<form><p> <input type="checkbox" name="foo" value="1" /> </p></form>""") self.assertEquals("""<form><p> <input type="checkbox" name="foo" value="1" checked="checked"/> </p></form>""", (html | HTMLFormFiller(data={'foo': ['1']})).render()) self.assertEquals("""<form><p> <input type="checkbox" name="foo" value="1"/> </p></form>""", (html | HTMLFormFiller(data={'foo': ['2']})).render()) def test_fill_input_radio_no_value(self): html = HTML("""<form><p> <input type="radio" name="foo" /> </p></form>""") | HTMLFormFiller() self.assertEquals("""<form><p> <input type="radio" name="foo"/> </p></form>""", html.render()) def test_fill_input_radio_single_value(self): html = HTML("""<form><p> <input type="radio" name="foo" value="1" /> </p></form>""") self.assertEquals("""<form><p> <input type="radio" name="foo" value="1" checked="checked"/> </p></form>""", (html | HTMLFormFiller(data={'foo': '1'})).render()) self.assertEquals("""<form><p> <input type="radio" name="foo" value="1"/> </p></form>""", (html | HTMLFormFiller(data={'foo': '2'})).render()) def test_fill_input_radio_multi_value(self): html = HTML("""<form><p> <input type="radio" name="foo" value="1" /> </p></form>""") self.assertEquals("""<form><p> <input type="radio" name="foo" value="1" checked="checked"/> </p></form>""", (html | HTMLFormFiller(data={'foo': ['1']})).render()) self.assertEquals("""<form><p> <input type="radio" name="foo" value="1"/> </p></form>""", (html | HTMLFormFiller(data={'foo': ['2']})).render()) def test_fill_select_no_value_auto(self): html = HTML("""<form><p> <select name="foo"> <option>1</option> <option>2</option> <option>3</option> </select> </p></form>""") | HTMLFormFiller() self.assertEquals("""<form><p> <select name="foo"> <option>1</option> <option>2</option> <option>3</option> </select> </p></form>""", html.render()) def test_fill_select_no_value_defined(self): html = HTML("""<form><p> <select name="foo"> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> </select> </p></form>""") | HTMLFormFiller() self.assertEquals("""<form><p> <select name="foo"> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> </select> </p></form>""", html.render()) def test_fill_select_single_value_auto(self): html = HTML("""<form><p> <select name="foo"> <option>1</option> <option>2</option> <option>3</option> </select> </p></form>""") | HTMLFormFiller(data={'foo': '1'}) self.assertEquals("""<form><p> <select name="foo"> <option selected="selected">1</option> <option>2</option> <option>3</option> </select> </p></form>""", html.render()) def test_fill_select_single_value_defined(self): html = HTML("""<form><p> <select name="foo"> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> </select> </p></form>""") | HTMLFormFiller(data={'foo': '1'}) self.assertEquals("""<form><p> <select name="foo"> <option value="1" selected="selected">1</option> <option value="2">2</option> <option value="3">3</option> </select> </p></form>""", html.render()) def test_fill_select_multi_value_auto(self): html = HTML("""<form><p> <select name="foo" multiple> <option>1</option> <option>2</option> <option>3</option> </select> </p></form>""") | HTMLFormFiller(data={'foo': ['1', '3']}) self.assertEquals("""<form><p> <select name="foo" multiple="multiple"> <option selected="selected">1</option> <option>2</option> <option selected="selected">3</option> </select> </p></form>""", html.render()) def test_fill_select_multi_value_defined(self): html = HTML("""<form><p> <select name="foo" multiple> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> </select> </p></form>""") | HTMLFormFiller(data={'foo': ['1', '3']}) self.assertEquals("""<form><p> <select name="foo" multiple="multiple"> <option value="1" selected="selected">1</option> <option value="2">2</option> <option value="3" selected="selected">3</option> </select> </p></form>""", html.render()) def test_fill_option_segmented_text(self): html = MarkupTemplate("""<form> <select name="foo"> <option value="1">foo $x</option> </select> </form>""").generate(x=1) | HTMLFormFiller(data={'foo': '1'}) self.assertEquals("""<form> <select name="foo"> <option value="1" selected="selected">foo 1</option> </select> </form>""", html.render()) def test_fill_option_segmented_text_no_value(self): html = MarkupTemplate("""<form> <select name="foo"> <option>foo $x bar</option> </select> </form>""").generate(x=1) | HTMLFormFiller(data={'foo': 'foo 1 bar'}) self.assertEquals("""<form> <select name="foo"> <option selected="selected">foo 1 bar</option> </select> </form>""", html.render()) def test_fill_option_unicode_value(self): html = HTML("""<form> <select name="foo"> <option value="&ouml;">foo</option> </select> </form>""") | HTMLFormFiller(data={'foo': u'ö'}) self.assertEquals(u"""<form> <select name="foo"> <option value="ö" selected="selected">foo</option> </select> </form>""", html.render(encoding=None)) def test_fill_input_password_disabled(self): html = HTML("""<form><p> <input type="password" name="pass" /> </p></form>""") | HTMLFormFiller(data={'pass': 'bar'}) self.assertEquals("""<form><p> <input type="password" name="pass"/> </p></form>""", html.render()) def test_fill_input_password_enabled(self): html = HTML("""<form><p> <input type="password" name="pass" /> </p></form>""") | HTMLFormFiller(data={'pass': '1234'}, passwords=True) self.assertEquals("""<form><p> <input type="password" name="pass" value="1234"/> </p></form>""", html.render()) class HTMLSanitizerTestCase(unittest.TestCase): def test_sanitize_unchanged(self): html = HTML('<a href="#">fo<br />o</a>') self.assertEquals('<a href="#">fo<br/>o</a>', (html | HTMLSanitizer()).render()) html = HTML('<a href="#with:colon">foo</a>') self.assertEquals('<a href="#with:colon">foo</a>', (html | HTMLSanitizer()).render()) def test_sanitize_escape_text(self): html = HTML('<a href="#">fo&amp;</a>') self.assertEquals('<a href="#">fo&amp;</a>', (html | HTMLSanitizer()).render()) html = HTML('<a href="#">&lt;foo&gt;</a>') self.assertEquals('<a href="#">&lt;foo&gt;</a>', (html | HTMLSanitizer()).render()) def test_sanitize_entityref_text(self): html = HTML('<a href="#">fo&ouml;</a>') self.assertEquals(u'<a href="#">foö</a>', (html | HTMLSanitizer()).render(encoding=None)) def test_sanitize_escape_attr(self): html = HTML('<div title="&lt;foo&gt;"></div>') self.assertEquals('<div title="&lt;foo&gt;"/>', (html | HTMLSanitizer()).render()) def test_sanitize_close_empty_tag(self): html = HTML('<a href="#">fo<br>o</a>') self.assertEquals('<a href="#">fo<br/>o</a>', (html | HTMLSanitizer()).render()) def test_sanitize_invalid_entity(self): html = HTML('&junk;') self.assertEquals('&amp;junk;', (html | HTMLSanitizer()).render()) def test_sanitize_remove_script_elem(self): html = HTML('<script>alert("Foo")</script>') self.assertEquals('', (html | HTMLSanitizer()).render()) html = HTML('<SCRIPT SRC="http://example.com/"></SCRIPT>') self.assertEquals('', (html | HTMLSanitizer()).render()) self.assertRaises(ParseError, HTML, '<SCR\0IPT>alert("foo")</SCR\0IPT>') self.assertRaises(ParseError, HTML, '<SCRIPT&XYZ SRC="http://example.com/"></SCRIPT>') def test_sanitize_remove_onclick_attr(self): html = HTML('<div onclick=\'alert("foo")\' />') self.assertEquals('<div/>', (html | HTMLSanitizer()).render()) def test_sanitize_remove_input_password(self): html = HTML('<form><input type="password" /></form>') self.assertEquals('<form/>', (html | HTMLSanitizer()).render()) def test_sanitize_remove_comments(self): html = HTML('''<div><!-- conditional comment crap --></div>''') self.assertEquals('<div/>', (html | HTMLSanitizer()).render()) def test_sanitize_remove_style_scripts(self): sanitizer = HTMLSanitizer(safe_attrs=HTMLSanitizer.SAFE_ATTRS | set(['style'])) # Inline style with url() using javascript: scheme html = HTML('<DIV STYLE=\'background: url(javascript:alert("foo"))\'>') self.assertEquals('<div/>', (html | sanitizer).render()) # Inline style with url() using javascript: scheme, using control char html = HTML('<DIV STYLE=\'background: url(&#1;javascript:alert("foo"))\'>') self.assertEquals('<div/>', (html | sanitizer).render()) # Inline style with url() using javascript: scheme, in quotes html = HTML('<DIV STYLE=\'background: url("javascript:alert(foo)")\'>') self.assertEquals('<div/>', (html | sanitizer).render()) # IE expressions in CSS not allowed html = HTML('<DIV STYLE=\'width: expression(alert("foo"));\'>') self.assertEquals('<div/>', (html | sanitizer).render()) html = HTML('<DIV STYLE=\'width: e/**/xpression(alert("foo"));\'>') self.assertEquals('<div/>', (html | sanitizer).render()) html = HTML('<DIV STYLE=\'background: url(javascript:alert("foo"));' 'color: #fff\'>') self.assertEquals('<div style="color: #fff"/>', (html | sanitizer).render()) # Inline style with url() using javascript: scheme, using unicode # escapes html = HTML('<DIV STYLE=\'background: \\75rl(javascript:alert("foo"))\'>') self.assertEquals('<div/>', (html | sanitizer).render()) html = HTML('<DIV STYLE=\'background: \\000075rl(javascript:alert("foo"))\'>') self.assertEquals('<div/>', (html | sanitizer).render()) html = HTML('<DIV STYLE=\'background: \\75 rl(javascript:alert("foo"))\'>') self.assertEquals('<div/>', (html | sanitizer).render()) html = HTML('<DIV STYLE=\'background: \\000075 rl(javascript:alert("foo"))\'>') self.assertEquals('<div/>', (html | sanitizer).render()) html = HTML('<DIV STYLE=\'background: \\000075\r\nrl(javascript:alert("foo"))\'>') self.assertEquals('<div/>', (html | sanitizer).render()) def test_sanitize_remove_style_phishing(self): sanitizer = HTMLSanitizer(safe_attrs=HTMLSanitizer.SAFE_ATTRS | set(['style'])) # The position property is not allowed html = HTML('<div style="position:absolute;top:0"></div>') self.assertEquals('<div style="top:0"/>', (html | sanitizer).render()) # Normal margins get passed through html = HTML('<div style="margin:10px 20px"></div>') self.assertEquals('<div style="margin:10px 20px"/>', (html | sanitizer).render()) # But not negative margins html = HTML('<div style="margin:-1000px 0 0"></div>') self.assertEquals('<div/>', (html | sanitizer).render()) html = HTML('<div style="margin-left:-2000px 0 0"></div>') self.assertEquals('<div/>', (html | sanitizer).render()) html = HTML('<div style="margin-left:1em 1em 1em -4000px"></div>') self.assertEquals('<div/>', (html | sanitizer).render()) def test_sanitize_remove_src_javascript(self): html = HTML('<img src=\'javascript:alert("foo")\'>') self.assertEquals('<img/>', (html | HTMLSanitizer()).render()) # Case-insensitive protocol matching html = HTML('<IMG SRC=\'JaVaScRiPt:alert("foo")\'>') self.assertEquals('<img/>', (html | HTMLSanitizer()).render()) # Grave accents (not parsed) self.assertRaises(ParseError, HTML, '<IMG SRC=`javascript:alert("RSnake says, \'foo\'")`>') # Protocol encoded using UTF-8 numeric entities html = HTML('<IMG SRC=\'&#106;&#97;&#118;&#97;&#115;&#99;&#114;&#105;' '&#112;&#116;&#58;alert("foo")\'>') self.assertEquals('<img/>', (html | HTMLSanitizer()).render()) # Protocol encoded using UTF-8 numeric entities without a semicolon # (which is allowed because the max number of digits is used) html = HTML('<IMG SRC=\'&#0000106&#0000097&#0000118&#0000097' '&#0000115&#0000099&#0000114&#0000105&#0000112&#0000116' '&#0000058alert("foo")\'>') self.assertEquals('<img/>', (html | HTMLSanitizer()).render()) # Protocol encoded using UTF-8 numeric hex entities without a semicolon # (which is allowed because the max number of digits is used) html = HTML('<IMG SRC=\'&#x6A&#x61&#x76&#x61&#x73&#x63&#x72&#x69' '&#x70&#x74&#x3A;alert("foo")\'>') self.assertEquals('<img/>', (html | HTMLSanitizer()).render()) # Embedded tab character in protocol html = HTML('<IMG SRC=\'jav\tascript:alert("foo");\'>') self.assertEquals('<img/>', (html | HTMLSanitizer()).render()) # Embedded tab character in protocol, but encoded this time html = HTML('<IMG SRC=\'jav&#x09;ascript:alert("foo");\'>') self.assertEquals('<img/>', (html | HTMLSanitizer()).render()) def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(HTMLFormFiller.__module__)) suite.addTest(unittest.makeSuite(HTMLFormFillerTestCase, 'test')) suite.addTest(unittest.makeSuite(HTMLSanitizerTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.0295, 0.0021, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0316, 0.0021, 0, 0.66, 0.125, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.0358, 0.0021, 0, 0.6...
[ "import doctest", "import unittest", "from genshi.input import HTML, ParseError", "from genshi.filters.html import HTMLFormFiller, HTMLSanitizer", "from genshi.template import MarkupTemplate", "class HTMLFormFillerTestCase(unittest.TestCase):\n\n def test_fill_input_text_no_value(self):\n html =...
# -*- coding: utf-8 -*- # # Copyright (C) 2007-2008 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import unittest def suite(): from genshi.filters.tests import html, i18n, transform suite = unittest.TestSuite() suite.addTest(html.suite()) suite.addTest(i18n.suite()) if hasattr(doctest, 'NORMALIZE_WHITESPACE'): suite.addTest(transform.suite()) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.5185, 0.037, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.5556, 0.037, 0, 0.66, 0.3333, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 2, 0, 0.7593, 0.2963, 0, 0.66...
[ "import doctest", "import unittest", "def suite():\n from genshi.filters.tests import html, i18n, transform\n suite = unittest.TestSuite()\n suite.addTest(html.suite())\n suite.addTest(i18n.suite())\n if hasattr(doctest, 'NORMALIZE_WHITESPACE'):\n suite.addTest(transform.suite())\n retu...
# -*- coding: utf-8 -*- # # Copyright (C) 2007-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. from datetime import datetime import doctest from gettext import NullTranslations from StringIO import StringIO import unittest from genshi.core import Attrs from genshi.template import MarkupTemplate, Context from genshi.filters.i18n import Translator, extract from genshi.input import HTML class DummyTranslations(NullTranslations): _domains = {} def __init__(self, catalog=()): NullTranslations.__init__(self) self._catalog = catalog or {} self.plural = lambda n: n != 1 def add_domain(self, domain, catalog): translation = DummyTranslations(catalog) translation.add_fallback(self) self._domains[domain] = translation def _domain_call(self, func, domain, *args, **kwargs): return getattr(self._domains.get(domain, self), func)(*args, **kwargs) def ugettext(self, message): missing = object() tmsg = self._catalog.get(message, missing) if tmsg is missing: if self._fallback: return self._fallback.ugettext(message) return unicode(message) return tmsg def dugettext(self, domain, message): return self._domain_call('ugettext', domain, message) def ungettext(self, msgid1, msgid2, n): try: return self._catalog[(msgid1, self.plural(n))] except KeyError: if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) if n == 1: return msgid1 else: return msgid2 def dungettext(self, domain, singular, plural, numeral): return self._domain_call('ungettext', domain, singular, plural, numeral) class TranslatorTestCase(unittest.TestCase): def test_translate_included_attribute_text(self): """ Verify that translated attributes end up in a proper `Attrs` instance. """ html = HTML("""<html> <span title="Foo"></span> </html>""") translator = Translator(lambda s: u"Voh") stream = list(html.filter(translator)) kind, data, pos = stream[2] assert isinstance(data[1], Attrs) def test_extract_without_text(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <p title="Bar">Foo</p> ${ngettext("Singular", "Plural", num)} </html>""") translator = Translator(extract_text=False) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, 'ngettext', ('Singular', 'Plural', None), []), messages[0]) def test_extract_plural_form(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> ${ngettext("Singular", "Plural", num)} </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, 'ngettext', ('Singular', 'Plural', None), []), messages[0]) def test_extract_funky_plural_form(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> ${ngettext(len(items), *widget.display_names)} </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, 'ngettext', (None, None), []), messages[0]) def test_extract_gettext_with_unicode_string(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> ${gettext("Grüße")} </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, 'gettext', u'Gr\xfc\xdfe', []), messages[0]) def test_extract_included_attribute_text(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <span title="Foo"></span> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, None, 'Foo', []), messages[0]) def test_extract_attribute_expr(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <input type="submit" value="${_('Save')}" /> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, '_', 'Save', []), messages[0]) def test_extract_non_included_attribute_interpolated(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <a href="#anchor_${num}">Foo</a> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, None, 'Foo', []), messages[0]) def test_extract_text_from_sub(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <py:if test="foo">Foo</py:if> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, None, 'Foo', []), messages[0]) def test_ignore_tag_with_fixed_xml_lang(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <p xml:lang="en">(c) 2007 Edgewall Software</p> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(0, len(messages)) def test_extract_tag_with_variable_xml_lang(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <p xml:lang="${lang}">(c) 2007 Edgewall Software</p> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, None, '(c) 2007 Edgewall Software', []), messages[0]) def test_ignore_attribute_with_expression(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <input type="submit" value="Reply" title="Reply to comment $num" /> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(0, len(messages)) def test_extract_i18n_msg(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html">Help</a> for details. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Please see [1:Help] for details.', messages[0][2]) def test_translate_i18n_msg(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html">Help</a> for details. </p> </html>""") gettext = lambda s: u"Für Details siehe bitte [1:Hilfe]." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p>Für Details siehe bitte <a href="help.html">Hilfe</a>.</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_nonewline(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Please see <a href="help.html">Help</a></p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Please see [1:Help]', messages[0][2]) def test_translate_i18n_msg_nonewline(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Please see <a href="help.html">Help</a></p> </html>""") gettext = lambda s: u"Für Details siehe bitte [1:Hilfe]" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p>Für Details siehe bitte <a href="help.html">Hilfe</a></p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_elt_nonewline(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:msg>Please see <a href="help.html">Help</a></i18n:msg> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Please see [1:Help]', messages[0][2]) def test_translate_i18n_msg_elt_nonewline(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:msg>Please see <a href="help.html">Help</a></i18n:msg> </html>""") gettext = lambda s: u"Für Details siehe bitte [1:Hilfe]" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> Für Details siehe bitte <a href="help.html">Hilfe</a> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_nested(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html"><em>Help</em> page</a> for details. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Please see [1:[2:Help] page] for details.', messages[0][2]) def test_translate_i18n_msg_nested(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html"><em>Help</em> page</a> for details. </p> </html>""") gettext = lambda s: u"Für Details siehe bitte [1:[2:Hilfeseite]]." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p>Für Details siehe bitte <a href="help.html"><em>Hilfeseite</em></a>.</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_label_with_nested_input(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:msg=""> <label><input type="text" size="3" name="daysback" value="30" /> days back</label> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('[1:[2:] days back]', messages[0][2]) def test_translate_i18n_msg_label_with_nested_input(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:msg=""> <label><input type="text" size="3" name="daysback" value="30" /> foo bar</label> </div> </html>""") gettext = lambda s: "[1:[2:] foo bar]" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <div><label><input type="text" size="3" name="daysback" value="30"/> foo bar</label></div> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_empty(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" /> entries per page. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Show me [1:] entries per page.', messages[0][2]) def test_translate_i18n_msg_empty(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" /> entries per page. </p> </html>""") gettext = lambda s: u"[1:] Einträge pro Seite anzeigen." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p><input type="text" name="num"/> Einträge pro Seite anzeigen.</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_multiple(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html">Help</a> for <em>details</em>. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Please see [1:Help] for [2:details].', messages[0][2]) def test_translate_i18n_msg_multiple(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html">Help</a> for <em>details</em>. </p> </html>""") gettext = lambda s: u"Für [2:Details] siehe bitte [1:Hilfe]." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p>Für <em>Details</em> siehe bitte <a href="help.html">Hilfe</a>.</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_multiple_empty(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" /> entries per page, starting at page <input type="text" name="num" />. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Show me [1:] entries per page, starting at page [2:].', messages[0][2]) def test_translate_i18n_msg_multiple_empty(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" /> entries per page, starting at page <input type="text" name="num" />. </p> </html>""") gettext = lambda s: u"[1:] Einträge pro Seite, beginnend auf Seite [2:]." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p><input type="text" name="num"/> Eintr\xc3\xa4ge pro Seite, beginnend auf Seite <input type="text" name="num"/>.</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_with_param(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="name"> Hello, ${user.name}! </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Hello, %(name)s!', messages[0][2]) def test_translate_i18n_msg_with_param(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="name"> Hello, ${user.name}! </p> </html>""") gettext = lambda s: u"Hallo, %(name)s!" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p>Hallo, Jim!</p> </html>""", tmpl.generate(user=dict(name='Jim')).render()) def test_translate_i18n_msg_with_param_reordered(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="name"> Hello, ${user.name}! </p> </html>""") gettext = lambda s: u"%(name)s, sei gegrüßt!" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p>Jim, sei gegrüßt!</p> </html>""", tmpl.generate(user=dict(name='Jim')).render()) def test_translate_i18n_msg_with_attribute_param(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Hello, <a href="#${anchor}">dude</a>! </p> </html>""") gettext = lambda s: u"Sei gegrüßt, [1:Alter]!" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p>Sei gegrüßt, <a href="#42">Alter</a>!</p> </html>""", tmpl.generate(anchor='42').render()) def test_extract_i18n_msg_with_two_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="name, time"> Posted by ${post.author} at ${entry.time.strftime('%H:%m')} </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Posted by %(name)s at %(time)s', messages[0][2]) def test_translate_i18n_msg_with_two_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="name, time"> Written by ${entry.author} at ${entry.time.strftime('%H:%M')} </p> </html>""") gettext = lambda s: u"%(name)s schrieb dies um %(time)s" translator = Translator(gettext) translator.setup(tmpl) entry = { 'author': 'Jim', 'time': datetime(2008, 4, 1, 14, 30) } self.assertEqual("""<html> <p>Jim schrieb dies um 14:30</p> </html>""", tmpl.generate(entry=entry).render()) def test_extract_i18n_msg_with_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" py:attrs="{'value': x}" /> entries per page. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Show me [1:] entries per page.', messages[0][2]) def test_translate_i18n_msg_with_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" py:attrs="{'value': 'x'}" /> entries per page. </p> </html>""") gettext = lambda s: u"[1:] Einträge pro Seite anzeigen." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p><input type="text" name="num" value="x"/> Einträge pro Seite anzeigen.</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_with_comment(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:comment="As in foo bar" i18n:msg="">Foo</p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, None, 'Foo', ['As in foo bar']), messages[0]) tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" i18n:comment="As in foo bar">Foo</p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, None, 'Foo', ['As in foo bar']), messages[0]) def test_translate_i18n_msg_with_comment(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" i18n:comment="As in foo bar">Foo</p> </html>""") gettext = lambda s: u"Voh" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p>Voh</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_with_attr(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" title="Foo bar">Foo</p> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual((3, None, 'Foo bar', []), messages[0]) self.assertEqual((3, None, 'Foo', []), messages[1]) def test_translate_i18n_msg_with_attr(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" title="Foo bar">Foo</p> </html>""") gettext = lambda s: u"Voh" translator = Translator(DummyTranslations({ 'Foo': 'Voh', 'Foo bar': u'Voh bär' })) tmpl.filters.insert(0, translator) tmpl.add_directives(Translator.NAMESPACE, translator) self.assertEqual("""<html> <p title="Voh bär">Voh</p> </html>""", tmpl.generate().render()) def test_translate_with_translations_object(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" i18n:comment="As in foo bar">Foo</p> </html>""") translator = Translator(DummyTranslations({'Foo': 'Voh'})) translator.setup(tmpl) self.assertEqual("""<html> <p>Voh</p> </html>""", tmpl.generate().render()) def test_translate_i18n_msg_and_py_strip_directives(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" py:strip="">Foo</p> <p py:strip="" i18n:msg="">Foo</p> </html>""") translator = Translator(DummyTranslations({'Foo': 'Voh'})) translator.setup(tmpl) self.assertEqual("""<html> Voh Voh </html>""", tmpl.generate().render()) def test_i18n_msg_ticket_300_extract(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:msg params="date, author"> Changed ${ '10/12/2008' } ago by ${ 'me, the author' } </i18n:msg> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual( (3, None, 'Changed %(date)s ago by %(author)s', []), messages[0] ) def test_i18n_msg_ticket_300_translate(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:msg params="date, author"> Changed ${ date } ago by ${ author } </i18n:msg> </html>""") translations = DummyTranslations({ 'Changed %(date)s ago by %(author)s': u'Modificado à %(date)s por %(author)s' }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> Modificado à um dia por Pedro </html>""", tmpl.generate(date='um dia', author="Pedro").render()) def test_i18n_msg_ticket_251_extract(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""><tt><b>Translation[&nbsp;0&nbsp;]</b>: <em>One coin</em></tt></p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual( (3, None, u'[1:[2:Translation\\[\xa00\xa0\\]]: [3:One coin]]', []), messages[0] ) def test_i18n_msg_ticket_251_translate(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""><tt><b>Translation[&nbsp;0&nbsp;]</b>: <em>One coin</em></tt></p> </html>""") translations = DummyTranslations({ u'[1:[2:Translation\\[\xa00\xa0\\]]: [3:One coin]]': u'[1:[2:Trandução\\[\xa00\xa0\\]]: [3:Uma moeda]]' }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p><tt><b>Trandução[ 0 ]</b>: <em>Uma moeda</em></tt></p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_with_other_directives_nested(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" py:with="q = quote_plus(message[:80])">Before you do that, though, please first try <strong><a href="${trac.homepage}search?ticket=yes&amp;noquickjump=1&amp;q=$q">searching</a> for similar issues</strong>, as it is quite likely that this problem has been reported before. For questions about installation and configuration of Trac, please try the <a href="${trac.homepage}wiki/MailingList">mailing list</a> instead of filing a ticket. </p> </html>""") translator = Translator() translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual( 'Before you do that, though, please first try\n ' '[1:[2:searching]\n for similar issues], as it is ' 'quite likely that this problem\n has been reported ' 'before. For questions about installation\n and ' 'configuration of Trac, please try the\n ' '[3:mailing list]\n instead of filing a ticket.', messages[0][2] ) def test_translate_i18n_msg_with_other_directives_nested(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Before you do that, though, please first try <strong><a href="${trac.homepage}search?ticket=yes&amp;noquickjump=1&amp;q=q">searching</a> for similar issues</strong>, as it is quite likely that this problem has been reported before. For questions about installation and configuration of Trac, please try the <a href="${trac.homepage}wiki/MailingList">mailing list</a> instead of filing a ticket. </p> </html>""") translations = DummyTranslations({ 'Before you do that, though, please first try\n ' '[1:[2:searching]\n for similar issues], as it is ' 'quite likely that this problem\n has been reported ' 'before. For questions about installation\n and ' 'configuration of Trac, please try the\n ' '[3:mailing list]\n instead of filing a ticket.': u'Antes de o fazer, porém,\n ' u'[1:por favor tente [2:procurar]\n por problemas semelhantes], uma vez que ' u'é muito provável que este problema\n já tenha sido reportado ' u'anteriormente. Para questões relativas à instalação\n e ' u'configuração do Trac, por favor tente a\n ' u'[3:mailing list]\n em vez de criar um assunto.' }) translator = Translator(translations) translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) ctx = Context() ctx.push({'trac': {'homepage': 'http://trac.edgewall.org/'}}) self.assertEqual("""<html> <p>Antes de o fazer, porém, <strong>por favor tente <a href="http://trac.edgewall.org/search?ticket=yes&amp;noquickjump=1&amp;q=q">procurar</a> por problemas semelhantes</strong>, uma vez que é muito provável que este problema já tenha sido reportado anteriormente. Para questões relativas à instalação e configuração do Trac, por favor tente a <a href="http://trac.edgewall.org/wiki/MailingList">mailing list</a> em vez de criar um assunto.</p> </html>""", tmpl.generate(ctx).render()) def test_i18n_msg_with_other_nested_directives_with_reordered_content(self): # See: http://genshi.edgewall.org/ticket/300#comment:10 tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p py:if="not editable" class="hint" i18n:msg=""> <strong>Note:</strong> This repository is defined in <code><a href="${ 'href.wiki(TracIni)' }">trac.ini</a></code> and cannot be edited on this page. </p> </html>""") translations = DummyTranslations({ '[1:Note:] This repository is defined in\n ' '[2:[3:trac.ini]]\n and cannot be edited on this page.': u'[1:Nota:] Este repositório está definido em \n ' u'[2:[3:trac.ini]]\n e não pode ser editado nesta página.', }) translator = Translator(translations) translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual( '[1:Note:] This repository is defined in\n ' '[2:[3:trac.ini]]\n and cannot be edited on this page.', messages[0][2] ) self.assertEqual("""<html> <p class="hint"><strong>Nota:</strong> Este repositório está definido em <code><a href="href.wiki(TracIni)">trac.ini</a></code> e não pode ser editado nesta página.</p> </html>""", tmpl.generate(editable=False).render()) def test_translate_i18n_domain_with_msg_directives(self): #"""translate with i18n:domain and nested i18n:msg directives """ tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:domain="foo"> <p i18n:msg="">FooBar</p> <p i18n:msg="">Bar</p> </div> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'PT_Foo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>BarFoo</p> <p>PT_Foo</p> </div> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_with_inline_directives(self): #"""translate with inlined i18n:domain and i18n:msg directives""" tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" i18n:domain="foo">FooBar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>BarFoo</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_without_msg_directives(self): #"""translate domain call without i18n:msg directives still uses current domain""" tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Bar</p> <div i18n:domain="foo"> <p i18n:msg="">FooBar</p> <p i18n:msg="">Bar</p> <p>Bar</p> </div> <p>Bar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'PT_Foo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>Voh</p> <div> <p>BarFoo</p> <p>PT_Foo</p> <p>PT_Foo</p> </div> <p>Voh</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_as_directive_not_attribute(self): #"""translate with domain as directive""" tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:domain name="foo"> <p i18n:msg="">FooBar</p> <p i18n:msg="">Bar</p> <p>Bar</p> </i18n:domain> <p>Bar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'PT_Foo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>BarFoo</p> <p>PT_Foo</p> <p>PT_Foo</p> <p>Voh</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_nested_directives(self): #"""translate with nested i18n:domain directives""" tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Bar</p> <div i18n:domain="foo"> <p i18n:msg="">FooBar</p> <p i18n:domain="bar" i18n:msg="">Bar</p> <p>Bar</p> </div> <p>Bar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'foo_Bar'}) translations.add_domain('bar', {'Bar': 'bar_Bar'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>Voh</p> <div> <p>BarFoo</p> <p>bar_Bar</p> <p>foo_Bar</p> </div> <p>Voh</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_with_empty_nested_domain_directive(self): #"""translate with empty nested i18n:domain directive does not use dngettext""" tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Bar</p> <div i18n:domain="foo"> <p i18n:msg="">FooBar</p> <p i18n:domain="" i18n:msg="">Bar</p> <p>Bar</p> </div> <p>Bar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'foo_Bar'}) translations.add_domain('bar', {'Bar': 'bar_Bar'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>Voh</p> <div> <p>BarFoo</p> <p>Voh</p> <p>foo_Bar</p> </div> <p>Voh</p> </html>""", tmpl.generate().render()) def test_translate_i18n_choose_as_attribute(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="one"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </div> <div i18n:choose="two"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </div> </html>""") translations = DummyTranslations() translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>FooBar</p> </div> <div> <p>FooBars</p> </div> </html>""", tmpl.generate(one=1, two=2).render()) def test_translate_i18n_choose_as_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </i18n:choose> <i18n:choose numeral="one"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </i18n:choose> </html>""") translations = DummyTranslations() translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>FooBars</p> <p>FooBar</p> </html>""", tmpl.generate(one=1, two=2).render()) def test_translate_i18n_choose_as_attribute_with_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translations = DummyTranslations({ ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>Vohs John Doe</p> </div> </html>""", tmpl.generate(two=2, fname='John', lname='Doe').render()) def test_translate_i18n_choose_as_attribute_with_params_and_domain_as_param(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translations = DummyTranslations() translations.add_domain('foo', { ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>Vohs John Doe</p> </div> </html>""", tmpl.generate(two=2, fname='John', lname='Doe').render()) def test_translate_i18n_choose_as_directive_with_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> <i18n:choose numeral="one" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </html>""") translations = DummyTranslations({ ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>Vohs John Doe</p> <p>Voh John Doe</p> </html>""", tmpl.generate(one=1, two=2, fname='John', lname='Doe').render()) def test_translate_i18n_choose_as_directive_with_params_and_domain_as_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:domain name="foo"> <i18n:choose numeral="two" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </i18n:domain> <i18n:choose numeral="one" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </html>""") translations = DummyTranslations() translations.add_domain('foo', { ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>Vohs John Doe</p> <p>Foo John Doe</p> </html>""", tmpl.generate(one=1, two=2, fname='John', lname='Doe').render()) def test_extract_i18n_choose_as_attribute(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="one"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </div> <div i18n:choose="two"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual((3, 'ngettext', ('FooBar', 'FooBars'), []), messages[0]) self.assertEqual((7, 'ngettext', ('FooBar', 'FooBars'), []), messages[1]) def test_extract_i18n_choose_as_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </i18n:choose> <i18n:choose numeral="one"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </i18n:choose> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual((3, 'ngettext', ('FooBar', 'FooBars'), []), messages[0]) self.assertEqual((7, 'ngettext', ('FooBar', 'FooBars'), []), messages[1]) def test_extract_i18n_choose_as_attribute_with_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[0]) def test_extract_i18n_choose_as_attribute_with_params_and_domain_as_param(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((4, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[0]) def test_extract_i18n_choose_as_directive_with_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> <i18n:choose numeral="one" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[0]) self.assertEqual((7, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[1]) def test_extract_i18n_choose_as_directive_with_params_and_domain_as_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:domain name="foo"> <i18n:choose numeral="two" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </i18n:domain> <i18n:choose numeral="one" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual((4, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[0]) self.assertEqual((9, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[1]) def test_extract_i18n_choose_as_attribute_with_params_and_comment(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="two; fname, lname" i18n:comment="As in Foo Bar"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), ['As in Foo Bar']), messages[0]) def test_extract_i18n_choose_as_directive_with_params_and_comment(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two" params="fname, lname" i18n:comment="As in Foo Bar"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), ['As in Foo Bar']), messages[0]) def test_translate_i18n_domain_with_nested_inlcudes(self): import os, shutil, tempfile from genshi.template.loader import TemplateLoader dirname = tempfile.mkdtemp(suffix='genshi_test') try: for idx in range(7): file1 = open(os.path.join(dirname, 'tmpl%d.html' % idx), 'w') try: file1.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" py:strip=""> <div>Included tmpl$idx</div> <p i18n:msg="idx">Bar $idx</p> <p i18n:domain="bar">Bar</p> <p i18n:msg="idx" i18n:domain="">Bar $idx</p> <p i18n:domain="" i18n:msg="idx">Bar $idx</p> <py:if test="idx &lt; 6"> <xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/> </py:if> </html>""") finally: file1.close() file2 = open(os.path.join(dirname, 'tmpl10.html'), 'w') try: file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/> </html>""") finally: file2.close() def callback(template): translations = DummyTranslations({'Bar %(idx)s': 'Voh %(idx)s'}) translations.add_domain('foo', {'Bar %(idx)s': 'foo_Bar %(idx)s'}) translations.add_domain('bar', {'Bar': 'bar_Bar'}) translator = Translator(translations) translator.setup(template) loader = TemplateLoader([dirname], callback=callback) tmpl = loader.load('tmpl10.html') self.assertEqual("""<html> <div>Included tmpl0</div> <p>foo_Bar 0</p> <p>bar_Bar</p> <p>Voh 0</p> <p>Voh 0</p> <div>Included tmpl1</div> <p>foo_Bar 1</p> <p>bar_Bar</p> <p>Voh 1</p> <p>Voh 1</p> <div>Included tmpl2</div> <p>foo_Bar 2</p> <p>bar_Bar</p> <p>Voh 2</p> <p>Voh 2</p> <div>Included tmpl3</div> <p>foo_Bar 3</p> <p>bar_Bar</p> <p>Voh 3</p> <p>Voh 3</p> <div>Included tmpl4</div> <p>foo_Bar 4</p> <p>bar_Bar</p> <p>Voh 4</p> <p>Voh 4</p> <div>Included tmpl5</div> <p>foo_Bar 5</p> <p>bar_Bar</p> <p>Voh 5</p> <p>Voh 5</p> <div>Included tmpl6</div> <p>foo_Bar 6</p> <p>bar_Bar</p> <p>Voh 6</p> <p>Voh 6</p> </html>""", tmpl.generate(idx=-1).render()) finally: shutil.rmtree(dirname) def test_translate_i18n_domain_with_nested_inlcudes_with_translatable_attrs(self): import os, shutil, tempfile from genshi.template.loader import TemplateLoader dirname = tempfile.mkdtemp(suffix='genshi_test') try: for idx in range(4): file1 = open(os.path.join(dirname, 'tmpl%d.html' % idx), 'w') try: file1.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" py:strip=""> <div>Included tmpl$idx</div> <p title="${dg('foo', 'Bar %(idx)s') % dict(idx=idx)}" i18n:msg="idx">Bar $idx</p> <p title="Bar" i18n:domain="bar">Bar</p> <p title="Bar" i18n:msg="idx" i18n:domain="">Bar $idx</p> <p i18n:msg="idx" i18n:domain="" title="Bar">Bar $idx</p> <p i18n:domain="" i18n:msg="idx" title="Bar">Bar $idx</p> <py:if test="idx &lt; 3"> <xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/> </py:if> </html>""") finally: file1.close() file2 = open(os.path.join(dirname, 'tmpl10.html'), 'w') try: file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/> </html>""") finally: file2.close() translations = DummyTranslations({'Bar %(idx)s': 'Voh %(idx)s', 'Bar': 'Voh'}) translations.add_domain('foo', {'Bar %(idx)s': 'foo_Bar %(idx)s'}) translations.add_domain('bar', {'Bar': 'bar_Bar'}) translator = Translator(translations) def callback(template): translator.setup(template) loader = TemplateLoader([dirname], callback=callback) tmpl = loader.load('tmpl10.html') self.assertEqual("""<html> <div>Included tmpl0</div> <p title="foo_Bar 0">foo_Bar 0</p> <p title="bar_Bar">bar_Bar</p> <p title="Voh">Voh 0</p> <p title="Voh">Voh 0</p> <p title="Voh">Voh 0</p> <div>Included tmpl1</div> <p title="foo_Bar 1">foo_Bar 1</p> <p title="bar_Bar">bar_Bar</p> <p title="Voh">Voh 1</p> <p title="Voh">Voh 1</p> <p title="Voh">Voh 1</p> <div>Included tmpl2</div> <p title="foo_Bar 2">foo_Bar 2</p> <p title="bar_Bar">bar_Bar</p> <p title="Voh">Voh 2</p> <p title="Voh">Voh 2</p> <p title="Voh">Voh 2</p> <div>Included tmpl3</div> <p title="foo_Bar 3">foo_Bar 3</p> <p title="bar_Bar">bar_Bar</p> <p title="Voh">Voh 3</p> <p title="Voh">Voh 3</p> <p title="Voh">Voh 3</p> </html>""", tmpl.generate(idx=-1, dg=translations.dugettext).render()) finally: shutil.rmtree(dirname) def test_translate_i18n_msg_and_comment_with_py_strip_directives(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" i18n:comment="As in foo bar" py:strip="">Foo</p> <p py:strip="" i18n:msg="" i18n:comment="As in foo bar">Foo</p> </html>""") translator = Translator(DummyTranslations({'Foo': 'Voh'})) translator.setup(tmpl) self.assertEqual("""<html> Voh Voh </html>""", tmpl.generate().render()) def test_translate_i18n_choose_and_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translations = DummyTranslations({ ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>Vohs John Doe</p> </div> </html>""", tmpl.generate(two=2, fname='John', lname='Doe').render()) def test_translate_i18n_choose_and_domain_and_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translations = DummyTranslations() translations.add_domain('foo', { ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>Vohs John Doe</p> </div> </html>""", tmpl.generate(two=2, fname='John', lname='Doe').render()) def test_extract_i18n_msg_with_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" py:strip=""> Please see <a href="help.html">Help</a> for details. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, None, 'Please see [1:Help] for details.', []), messages[0]) def test_extract_i18n_msg_with_py_strip_and_comment(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" py:strip="" i18n:comment="Foo"> Please see <a href="help.html">Help</a> for details. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, None, 'Please see [1:Help] for details.', ['Foo']), messages[0]) def test_extract_i18n_choose_as_attribute_and_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="one" py:strip=""> <p i18n:singular="" py:strip="">FooBar</p> <p i18n:plural="" py:strip="">FooBars</p> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, 'ngettext', ('FooBar', 'FooBars'), []), messages[0]) def test_translate_i18n_domain_with_inline_directive_on_START_NS(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <p i18n:msg="">FooBar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>BarFoo</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_with_inline_directive_on_START_NS_with_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo" py:strip=""> <p i18n:msg="">FooBar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual(""" <p>BarFoo</p> """, tmpl.generate().render()) class ExtractTestCase(unittest.TestCase): def test_markup_template_extraction(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> <head> <title>Example</title> </head> <body> <h1>Example</h1> <p>${_("Hello, %(name)s") % dict(name=username)}</p> <p>${ngettext("You have %d item", "You have %d items", num)}</p> </body> </html>""") results = list(extract(buf, ['_', 'ngettext'], [], {})) self.assertEqual([ (3, None, 'Example', []), (6, None, 'Example', []), (7, '_', 'Hello, %(name)s', []), (8, 'ngettext', ('You have %d item', 'You have %d items', None), []), ], results) def test_extraction_without_text(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> <p title="Bar">Foo</p> ${ngettext("Singular", "Plural", num)} </html>""") results = list(extract(buf, ['_', 'ngettext'], [], { 'extract_text': 'no' })) self.assertEqual([ (3, 'ngettext', ('Singular', 'Plural', None), []), ], results) def test_text_template_extraction(self): buf = StringIO("""${_("Dear %(name)s") % {'name': name}}, ${ngettext("Your item:", "Your items", len(items))} #for item in items * $item #end All the best, Foobar""") results = list(extract(buf, ['_', 'ngettext'], [], { 'template_class': 'genshi.template:TextTemplate' })) self.assertEqual([ (1, '_', 'Dear %(name)s', []), (3, 'ngettext', ('Your item:', 'Your items', None), []), (7, None, 'All the best,\n Foobar', []) ], results) def test_extraction_with_keyword_arg(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> ${gettext('Foobar', foo='bar')} </html>""") results = list(extract(buf, ['gettext'], [], {})) self.assertEqual([ (2, 'gettext', ('Foobar'), []), ], results) def test_extraction_with_nonstring_arg(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> ${dgettext(curdomain, 'Foobar')} </html>""") results = list(extract(buf, ['dgettext'], [], {})) self.assertEqual([ (2, 'dgettext', (None, 'Foobar'), []), ], results) def test_extraction_inside_ignored_tags(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> <script type="text/javascript"> $('#llist').tabs({ remote: true, spinner: "${_('Please wait...')}" }); </script> </html>""") results = list(extract(buf, ['_'], [], {})) self.assertEqual([ (5, '_', 'Please wait...', []), ], results) def test_extraction_inside_ignored_tags_with_directives(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> <script type="text/javascript"> <py:if test="foobar"> alert("This shouldn't be extracted"); </py:if> </script> </html>""") self.assertEqual([], list(extract(buf, ['_'], [], {}))) def test_extract_py_def_directive_with_py_strip(self): # Failed extraction from Trac tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" py:strip=""> <py:def function="diff_options_fields(diff)"> <label for="style">View differences</label> <select id="style" name="style"> <option selected="${diff.style == 'inline' or None}" value="inline">inline</option> <option selected="${diff.style == 'sidebyside' or None}" value="sidebyside">side by side</option> </select> <div class="field"> Show <input type="text" name="contextlines" id="contextlines" size="2" maxlength="3" value="${diff.options.contextlines &lt; 0 and 'all' or diff.options.contextlines}" /> <label for="contextlines">lines around each change</label> </div> <fieldset id="ignore" py:with="options = diff.options"> <legend>Ignore:</legend> <div class="field"> <input type="checkbox" id="ignoreblanklines" name="ignoreblanklines" checked="${options.ignoreblanklines or None}" /> <label for="ignoreblanklines">Blank lines</label> </div> <div class="field"> <input type="checkbox" id="ignorecase" name="ignorecase" checked="${options.ignorecase or None}" /> <label for="ignorecase">Case changes</label> </div> <div class="field"> <input type="checkbox" id="ignorewhitespace" name="ignorewhitespace" checked="${options.ignorewhitespace or None}" /> <label for="ignorewhitespace">White space changes</label> </div> </fieldset> <div class="buttons"> <input type="submit" name="update" value="${_('Update')}" /> </div> </py:def></html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(10, len(messages)) self.assertEqual([ (3, None, 'View differences', []), (6, None, 'inline', []), (8, None, 'side by side', []), (10, None, 'Show', []), (13, None, 'lines around each change', []), (16, None, 'Ignore:', []), (20, None, 'Blank lines', []), (25, None, 'Case changes',[]), (30, None, 'White space changes', []), (34, '_', 'Update', [])], messages) def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(Translator.__module__)) suite.addTest(unittest.makeSuite(TranslatorTestCase, 'test')) suite.addTest(unittest.makeSuite(ExtractTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
[ [ 1, 0, 0.0086, 0.0006, 0, 0.66, 0, 426, 0, 1, 0, 0, 426, 0, 0 ], [ 1, 0, 0.0092, 0.0006, 0, 0.66, 0.0769, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0098, 0.0006, 0, ...
[ "from datetime import datetime", "import doctest", "from gettext import NullTranslations", "from StringIO import StringIO", "import unittest", "from genshi.core import Attrs", "from genshi.template import MarkupTemplate, Context", "from genshi.filters.i18n import Translator, extract", "from genshi.i...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Implementation of a number of stream filters.""" try: any except NameError: from genshi.util import any import re from genshi.core import Attrs, QName, stripentities from genshi.core import END, START, TEXT, COMMENT __all__ = ['HTMLFormFiller', 'HTMLSanitizer'] __docformat__ = 'restructuredtext en' class HTMLFormFiller(object): """A stream filter that can populate HTML forms from a dictionary of values. >>> from genshi.input import HTML >>> html = HTML('''<form> ... <p><input type="text" name="foo" /></p> ... </form>''') >>> filler = HTMLFormFiller(data={'foo': 'bar'}) >>> print(html | filler) <form> <p><input type="text" name="foo" value="bar"/></p> </form> """ # TODO: only select the first radio button, and the first select option # (if not in a multiple-select) # TODO: only apply to elements in the XHTML namespace (or no namespace)? def __init__(self, name=None, id=None, data=None, passwords=False): """Create the filter. :param name: The name of the form that should be populated. If this parameter is given, only forms where the ``name`` attribute value matches the parameter are processed. :param id: The ID of the form that should be populated. If this parameter is given, only forms where the ``id`` attribute value matches the parameter are processed. :param data: The dictionary of form values, where the keys are the names of the form fields, and the values are the values to fill in. :param passwords: Whether password input fields should be populated. This is off by default for security reasons (for example, a password may end up in the browser cache) :note: Changed in 0.5.2: added the `passwords` option """ self.name = name self.id = id if data is None: data = {} self.data = data self.passwords = passwords def __call__(self, stream): """Apply the filter to the given stream. :param stream: the markup event stream to filter """ in_form = in_select = in_option = in_textarea = False select_value = option_value = textarea_value = None option_start = None option_text = [] no_option_value = False for kind, data, pos in stream: if kind is START: tag, attrs = data tagname = tag.localname if tagname == 'form' and ( self.name and attrs.get('name') == self.name or self.id and attrs.get('id') == self.id or not (self.id or self.name)): in_form = True elif in_form: if tagname == 'input': type = attrs.get('type', '').lower() if type in ('checkbox', 'radio'): name = attrs.get('name') if name and name in self.data: value = self.data[name] declval = attrs.get('value') checked = False if isinstance(value, (list, tuple)): if declval: checked = declval in [unicode(v) for v in value] else: checked = any(value) else: if declval: checked = declval == unicode(value) elif type == 'checkbox': checked = bool(value) if checked: attrs |= [(QName('checked'), 'checked')] elif 'checked' in attrs: attrs -= 'checked' elif type in ('', 'hidden', 'text') \ or type == 'password' and self.passwords: name = attrs.get('name') if name and name in self.data: value = self.data[name] if isinstance(value, (list, tuple)): value = value[0] if value is not None: attrs |= [ (QName('value'), unicode(value)) ] elif tagname == 'select': name = attrs.get('name') if name in self.data: select_value = self.data[name] in_select = True elif tagname == 'textarea': name = attrs.get('name') if name in self.data: textarea_value = self.data.get(name) if isinstance(textarea_value, (list, tuple)): textarea_value = textarea_value[0] in_textarea = True elif in_select and tagname == 'option': option_start = kind, data, pos option_value = attrs.get('value') if option_value is None: no_option_value = True option_value = '' in_option = True continue yield kind, (tag, attrs), pos elif in_form and kind is TEXT: if in_select and in_option: if no_option_value: option_value += data option_text.append((kind, data, pos)) continue elif in_textarea: continue yield kind, data, pos elif in_form and kind is END: tagname = data.localname if tagname == 'form': in_form = False elif tagname == 'select': in_select = False select_value = None elif in_select and tagname == 'option': if isinstance(select_value, (tuple, list)): selected = option_value in [unicode(v) for v in select_value] else: selected = option_value == unicode(select_value) okind, (tag, attrs), opos = option_start if selected: attrs |= [(QName('selected'), 'selected')] elif 'selected' in attrs: attrs -= 'selected' yield okind, (tag, attrs), opos if option_text: for event in option_text: yield event in_option = False no_option_value = False option_start = option_value = None option_text = [] elif tagname == 'textarea': if textarea_value: yield TEXT, unicode(textarea_value), pos in_textarea = False yield kind, data, pos else: yield kind, data, pos class HTMLSanitizer(object): """A filter that removes potentially dangerous HTML tags and attributes from the stream. >>> from genshi import HTML >>> html = HTML('<div><script>alert(document.cookie)</script></div>') >>> print(html | HTMLSanitizer()) <div/> The default set of safe tags and attributes can be modified when the filter is instantiated. For example, to allow inline ``style`` attributes, the following instantation would work: >>> html = HTML('<div style="background: #000"></div>') >>> sanitizer = HTMLSanitizer(safe_attrs=HTMLSanitizer.SAFE_ATTRS | set(['style'])) >>> print(html | sanitizer) <div style="background: #000"/> Note that even in this case, the filter *does* attempt to remove dangerous constructs from style attributes: >>> html = HTML('<div style="background: url(javascript:void); color: #000"></div>') >>> print(html | sanitizer) <div style="color: #000"/> This handles HTML entities, unicode escapes in CSS and Javascript text, as well as a lot of other things. However, the style tag is still excluded by default because it is very hard for such sanitizing to be completely safe, especially considering how much error recovery current web browsers perform. It also does some basic filtering of CSS properties that may be used for typical phishing attacks. For more sophisticated filtering, this class provides a couple of hooks that can be overridden in sub-classes. :warn: Note that this special processing of CSS is currently only applied to style attributes, **not** style elements. """ SAFE_TAGS = frozenset(['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var']) SAFE_ATTRS = frozenset(['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'axis', 'bgcolor', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', 'usemap', 'valign', 'value', 'vspace', 'width']) SAFE_SCHEMES = frozenset(['file', 'ftp', 'http', 'https', 'mailto', None]) URI_ATTRS = frozenset(['action', 'background', 'dynsrc', 'href', 'lowsrc', 'src']) def __init__(self, safe_tags=SAFE_TAGS, safe_attrs=SAFE_ATTRS, safe_schemes=SAFE_SCHEMES, uri_attrs=URI_ATTRS): """Create the sanitizer. The exact set of allowed elements and attributes can be configured. :param safe_tags: a set of tag names that are considered safe :param safe_attrs: a set of attribute names that are considered safe :param safe_schemes: a set of URI schemes that are considered safe :param uri_attrs: a set of names of attributes that contain URIs """ self.safe_tags = safe_tags "The set of tag names that are considered safe." self.safe_attrs = safe_attrs "The set of attribute names that are considered safe." self.uri_attrs = uri_attrs "The set of names of attributes that may contain URIs." self.safe_schemes = safe_schemes "The set of URI schemes that are considered safe." def __call__(self, stream): """Apply the filter to the given stream. :param stream: the markup event stream to filter """ waiting_for = None for kind, data, pos in stream: if kind is START: if waiting_for: continue tag, attrs = data if not self.is_safe_elem(tag, attrs): waiting_for = tag continue new_attrs = [] for attr, value in attrs: value = stripentities(value) if attr not in self.safe_attrs: continue elif attr in self.uri_attrs: # Don't allow URI schemes such as "javascript:" if not self.is_safe_uri(value): continue elif attr == 'style': # Remove dangerous CSS declarations from inline styles decls = self.sanitize_css(value) if not decls: continue value = '; '.join(decls) new_attrs.append((attr, value)) yield kind, (tag, Attrs(new_attrs)), pos elif kind is END: tag = data if waiting_for: if waiting_for == tag: waiting_for = None else: yield kind, data, pos elif kind is not COMMENT: if not waiting_for: yield kind, data, pos def is_safe_css(self, propname, value): """Determine whether the given css property declaration is to be considered safe for inclusion in the output. :param propname: the CSS property name :param value: the value of the property :return: whether the property value should be considered safe :rtype: bool :since: version 0.6 """ if propname == 'position': return False if propname.startswith('margin') and '-' in value: # Negative margins can be used for phishing return False return True def is_safe_elem(self, tag, attrs): """Determine whether the given element should be considered safe for inclusion in the output. :param tag: the tag name of the element :type tag: QName :param attrs: the element attributes :type attrs: Attrs :return: whether the element should be considered safe :rtype: bool :since: version 0.6 """ if tag not in self.safe_tags: return False if tag.localname == 'input': input_type = attrs.get('type', '').lower() if input_type == 'password': return False return True def is_safe_uri(self, uri): """Determine whether the given URI is to be considered safe for inclusion in the output. The default implementation checks whether the scheme of the URI is in the set of allowed URIs (`safe_schemes`). >>> sanitizer = HTMLSanitizer() >>> sanitizer.is_safe_uri('http://example.org/') True >>> sanitizer.is_safe_uri('javascript:alert(document.cookie)') False :param uri: the URI to check :return: `True` if the URI can be considered safe, `False` otherwise :rtype: `bool` :since: version 0.4.3 """ if '#' in uri: uri = uri.split('#', 1)[0] # Strip out the fragment identifier if ':' not in uri: return True # This is a relative URI chars = [char for char in uri.split(':', 1)[0] if char.isalnum()] return ''.join(chars).lower() in self.safe_schemes def sanitize_css(self, text): """Remove potentially dangerous property declarations from CSS code. In particular, properties using the CSS ``url()`` function with a scheme that is not considered safe are removed: >>> sanitizer = HTMLSanitizer() >>> sanitizer.sanitize_css(u''' ... background: url(javascript:alert("foo")); ... color: #000; ... ''') [u'color: #000'] Also, the proprietary Internet Explorer function ``expression()`` is always stripped: >>> sanitizer.sanitize_css(u''' ... background: #fff; ... color: #000; ... width: e/**/xpression(alert("foo")); ... ''') [u'background: #fff', u'color: #000'] :param text: the CSS text; this is expected to be `unicode` and to not contain any character or numeric references :return: a list of declarations that are considered safe :rtype: `list` :since: version 0.4.3 """ decls = [] text = self._strip_css_comments(self._replace_unicode_escapes(text)) for decl in text.split(';'): decl = decl.strip() if not decl: continue try: propname, value = decl.split(':', 1) except ValueError: continue if not self.is_safe_css(propname.strip().lower(), value.strip()): continue is_evil = False if 'expression' in value: is_evil = True for match in re.finditer(r'url\s*\(([^)]+)', value): if not self.is_safe_uri(match.group(1)): is_evil = True break if not is_evil: decls.append(decl.strip()) return decls _NORMALIZE_NEWLINES = re.compile(r'\r\n').sub _UNICODE_ESCAPE = re.compile(r'\\([0-9a-fA-F]{1,6})\s?').sub def _replace_unicode_escapes(self, text): def _repl(match): return unichr(int(match.group(1), 16)) return self._UNICODE_ESCAPE(_repl, self._NORMALIZE_NEWLINES('\n', text)) _CSS_COMMENTS = re.compile(r'/\*.*?\*/').sub def _strip_css_comments(self, text): return self._CSS_COMMENTS('', text)
[ [ 8, 0, 0.0309, 0.0022, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 7, 0, 0.0386, 0.0088, 0, 0.66, 0.125, 0, 0, 1, 0, 0, 0, 0, 0 ], [ 8, 1, 0.0375, 0.0022, 1, 0.21, ...
[ "\"\"\"Implementation of a number of stream filters.\"\"\"", "try:\n any\nexcept NameError:\n from genshi.util import any", " any", " from genshi.util import any", "import re", "from genshi.core import Attrs, QName, stripentities", "from genshi.core import END, START, TEXT, COMMENT", "__al...
# -*- coding: utf-8 -*- # # Copyright (C) 2007-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Implementation of a number of stream filters.""" from genshi.filters.html import HTMLFormFiller, HTMLSanitizer from genshi.filters.i18n import Translator from genshi.filters.transform import Transformer __docformat__ = 'restructuredtext en'
[ [ 8, 0, 0.7, 0.05, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.8, 0.05, 0, 0.66, 0.25, 844, 0, 2, 0, 0, 844, 0, 0 ], [ 1, 0, 0.85, 0.05, 0, 0.66, 0.5, 5...
[ "\"\"\"Implementation of a number of stream filters.\"\"\"", "from genshi.filters.html import HTMLFormFiller, HTMLSanitizer", "from genshi.filters.i18n import Translator", "from genshi.filters.transform import Transformer", "__docformat__ = 'restructuredtext en'" ]
# -*- coding: utf-8 -*- # # Copyright (C) 2007-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Directives and utilities for internationalization and localization of templates. :since: version 0.4 :note: Directives support added since version 0.6 """ try: any except NameError: from genshi.util import any from gettext import NullTranslations import os import re from types import FunctionType from genshi.core import Attrs, Namespace, QName, START, END, TEXT, START_NS, \ END_NS, XML_NAMESPACE, _ensure, StreamEventKind from genshi.template.eval import _ast from genshi.template.base import DirectiveFactory, EXPR, SUB, _apply_directives from genshi.template.directives import Directive, StripDirective from genshi.template.markup import MarkupTemplate, EXEC __all__ = ['Translator', 'extract'] __docformat__ = 'restructuredtext en' I18N_NAMESPACE = Namespace('http://genshi.edgewall.org/i18n') MSGBUF = StreamEventKind('MSGBUF') SUB_START = StreamEventKind('SUB_START') SUB_END = StreamEventKind('SUB_END') class I18NDirective(Directive): """Simple interface for i18n directives to support messages extraction.""" def __call__(self, stream, directives, ctxt, **vars): return _apply_directives(stream, directives, ctxt, vars) class ExtractableI18NDirective(I18NDirective): """Simple interface for directives to support messages extraction.""" def extract(self, stream, comment_stack): raise NotImplementedError class CommentDirective(I18NDirective): """Implementation of the ``i18n:comment`` template directive which adds translation comments. >>> tmpl = MarkupTemplate('''<html xmlns:i18n="http://genshi.edgewall.org/i18n"> ... <p i18n:comment="As in Foo Bar">Foo</p> ... </html>''') >>> translator = Translator() >>> translator.setup(tmpl) >>> list(translator.extract(tmpl.stream)) [(2, None, u'Foo', [u'As in Foo Bar'])] """ __slots__ = ['comment'] def __init__(self, value, template, hints=None, namespaces=None, lineno=-1, offset=-1): Directive.__init__(self, None, template, namespaces, lineno, offset) self.comment = value class MsgDirective(ExtractableI18NDirective): r"""Implementation of the ``i18n:msg`` directive which marks inner content as translatable. Consider the following examples: >>> tmpl = MarkupTemplate('''<html xmlns:i18n="http://genshi.edgewall.org/i18n"> ... <div i18n:msg=""> ... <p>Foo</p> ... <p>Bar</p> ... </div> ... <p i18n:msg="">Foo <em>bar</em>!</p> ... </html>''') >>> translator = Translator() >>> translator.setup(tmpl) >>> list(translator.extract(tmpl.stream)) [(2, None, u'[1:Foo]\n [2:Bar]', []), (6, None, u'Foo [1:bar]!', [])] >>> print(tmpl.generate().render()) <html> <div><p>Foo</p> <p>Bar</p></div> <p>Foo <em>bar</em>!</p> </html> >>> tmpl = MarkupTemplate('''<html xmlns:i18n="http://genshi.edgewall.org/i18n"> ... <div i18n:msg="fname, lname"> ... <p>First Name: ${fname}</p> ... <p>Last Name: ${lname}</p> ... </div> ... <p i18n:msg="">Foo <em>bar</em>!</p> ... </html>''') >>> translator.setup(tmpl) >>> list(translator.extract(tmpl.stream)) #doctest: +NORMALIZE_WHITESPACE [(2, None, u'[1:First Name: %(fname)s]\n [2:Last Name: %(lname)s]', []), (6, None, u'Foo [1:bar]!', [])] >>> tmpl = MarkupTemplate('''<html xmlns:i18n="http://genshi.edgewall.org/i18n"> ... <div i18n:msg="fname, lname"> ... <p>First Name: ${fname}</p> ... <p>Last Name: ${lname}</p> ... </div> ... <p i18n:msg="">Foo <em>bar</em>!</p> ... </html>''') >>> translator.setup(tmpl) >>> print(tmpl.generate(fname='John', lname='Doe').render()) <html> <div><p>First Name: John</p> <p>Last Name: Doe</p></div> <p>Foo <em>bar</em>!</p> </html> Starting and ending white-space is stripped of to make it simpler for translators. Stripping it is not that important since it's on the html source, the rendered output will remain the same. """ __slots__ = ['params'] def __init__(self, value, template, hints=None, namespaces=None, lineno=-1, offset=-1): Directive.__init__(self, None, template, namespaces, lineno, offset) self.params = [param.strip() for param in value.split(',') if param] @classmethod def attach(cls, template, stream, value, namespaces, pos): if type(value) is dict: value = value.get('params', '').strip() return super(MsgDirective, cls).attach(template, stream, value.strip(), namespaces, pos) def __call__(self, stream, directives, ctxt, **vars): gettext = ctxt.get('_i18n.gettext') dgettext = ctxt.get('_i18n.dgettext') if ctxt.get('_i18n.domain'): assert hasattr(dgettext, '__call__'), \ 'No domain gettext function passed' gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg) def _generate(): msgbuf = MessageBuffer(self) previous = stream.next() if previous[0] is START: yield previous else: msgbuf.append(*previous) previous = stream.next() for kind, data, pos in stream: msgbuf.append(*previous) previous = kind, data, pos if previous[0] is not END: msgbuf.append(*previous) previous = None for event in msgbuf.translate(gettext(msgbuf.format())): yield event if previous: yield previous return _apply_directives(_generate(), directives, ctxt, vars) def extract(self, stream, comment_stack): msgbuf = MessageBuffer(self) stream = iter(stream) previous = stream.next() if previous[0] is START: previous = stream.next() for event in stream: msgbuf.append(*previous) previous = event msgbuf.append(*previous) yield None, msgbuf.format(), comment_stack[-1:] class ChooseBranchDirective(I18NDirective): __slots__ = ['params'] def __call__(self, stream, directives, ctxt, **vars): self.params = ctxt.get('_i18n.choose.params', [])[:] msgbuf = MessageBuffer(self) stream = iter(_apply_directives(stream, directives, ctxt, vars)) yield stream.next() # the outer start tag previous = stream.next() for kind, data, pos in stream: msgbuf.append(*previous) previous = kind, data, pos yield MSGBUF, (), -1 # the place holder for msgbuf output yield previous # the outer end tag ctxt['_i18n.choose.%s' % type(self).__name__] = msgbuf def extract(self, stream, comment_stack, msgbuf): stream = iter(stream) previous = stream.next() if previous[0] is START: previous = stream.next() for event in stream: msgbuf.append(*previous) previous = event if previous[0] is not END: msgbuf.append(*previous) return msgbuf class SingularDirective(ChooseBranchDirective): """Implementation of the ``i18n:singular`` directive to be used with the ``i18n:choose`` directive.""" class PluralDirective(ChooseBranchDirective): """Implementation of the ``i18n:plural`` directive to be used with the ``i18n:choose`` directive.""" class ChooseDirective(ExtractableI18NDirective): """Implementation of the ``i18n:choose`` directive which provides plural internationalisation of strings. This directive requires at least one parameter, the one which evaluates to an integer which will allow to choose the plural/singular form. If you also have expressions inside the singular and plural version of the string you also need to pass a name for those parameters. Consider the following examples: >>> tmpl = MarkupTemplate('''\ <html xmlns:i18n="http://genshi.edgewall.org/i18n"> ... <div i18n:choose="num; num"> ... <p i18n:singular="">There is $num coin</p> ... <p i18n:plural="">There are $num coins</p> ... </div> ... </html>''') >>> translator = Translator() >>> translator.setup(tmpl) >>> list(translator.extract(tmpl.stream)) #doctest: +NORMALIZE_WHITESPACE [(2, 'ngettext', (u'There is %(num)s coin', u'There are %(num)s coins'), [])] >>> tmpl = MarkupTemplate('''\ <html xmlns:i18n="http://genshi.edgewall.org/i18n"> ... <div i18n:choose="num; num"> ... <p i18n:singular="">There is $num coin</p> ... <p i18n:plural="">There are $num coins</p> ... </div> ... </html>''') >>> translator.setup(tmpl) >>> print(tmpl.generate(num=1).render()) <html> <div> <p>There is 1 coin</p> </div> </html> >>> print(tmpl.generate(num=2).render()) <html> <div> <p>There are 2 coins</p> </div> </html> When used as a directive and not as an attribute: >>> tmpl = MarkupTemplate('''\ <html xmlns:i18n="http://genshi.edgewall.org/i18n"> ... <i18n:choose numeral="num" params="num"> ... <p i18n:singular="">There is $num coin</p> ... <p i18n:plural="">There are $num coins</p> ... </i18n:choose> ... </html>''') >>> translator.setup(tmpl) >>> list(translator.extract(tmpl.stream)) #doctest: +NORMALIZE_WHITESPACE [(2, 'ngettext', (u'There is %(num)s coin', u'There are %(num)s coins'), [])] """ __slots__ = ['numeral', 'params'] def __init__(self, value, template, hints=None, namespaces=None, lineno=-1, offset=-1): Directive.__init__(self, None, template, namespaces, lineno, offset) params = [v.strip() for v in value.split(';')] self.numeral = self._parse_expr(params.pop(0), template, lineno, offset) self.params = params and [name.strip() for name in params[0].split(',') if name] or [] @classmethod def attach(cls, template, stream, value, namespaces, pos): if type(value) is dict: numeral = value.get('numeral', '').strip() assert numeral is not '', "at least pass the numeral param" params = [v.strip() for v in value.get('params', '').split(',')] value = '%s; ' % numeral + ', '.join(params) return super(ChooseDirective, cls).attach(template, stream, value, namespaces, pos) def __call__(self, stream, directives, ctxt, **vars): ctxt.push({'_i18n.choose.params': self.params, '_i18n.choose.SingularDirective': None, '_i18n.choose.PluralDirective': None}) new_stream = [] singular_stream = None singular_msgbuf = None plural_stream = None plural_msgbuf = None ngettext = ctxt.get('_i18n.ungettext') assert hasattr(ngettext, '__call__'), 'No ngettext function available' dngettext = ctxt.get('_i18n.dngettext') if not dngettext: dngettext = lambda d, s, p, n: ngettext(s, p, n) for kind, event, pos in stream: if kind is SUB: subdirectives, substream = event if isinstance(subdirectives[0], SingularDirective) and not singular_stream: # Apply directives to update context singular_stream = list(_apply_directives(substream, subdirectives, ctxt, vars)) new_stream.append((MSGBUF, (), ('', -1))) # msgbuf place holder singular_msgbuf = ctxt.get('_i18n.choose.SingularDirective') elif isinstance(subdirectives[0], PluralDirective) and not plural_stream: # Apply directives to update context plural_stream = list(_apply_directives(substream, subdirectives, ctxt, vars)) plural_msgbuf = ctxt.get('_i18n.choose.PluralDirective') else: new_stream.append((kind, event, pos)) else: new_stream.append((kind, event, pos)) if ctxt.get('_i18n.domain'): ngettext = lambda s, p, n: dngettext(ctxt.get('_i18n.domain'), s, p, n) for kind, data, pos in new_stream: if kind is MSGBUF: for skind, sdata, spos in singular_stream: if skind is MSGBUF: translation = ngettext(singular_msgbuf.format(), plural_msgbuf.format(), self.numeral.evaluate(ctxt)) for event in singular_msgbuf.translate(translation): yield event else: yield skind, sdata, spos else: yield kind, data, pos ctxt.pop() def extract(self, stream, comment_stack): stream = iter(stream) previous = stream.next() if previous is START: stream.next() singular_msgbuf = MessageBuffer(self) plural_msgbuf = MessageBuffer(self) for kind, event, pos in stream: if kind is SUB: subdirectives, substream = event for subdirective in subdirectives: if isinstance(subdirective, SingularDirective): singular_msgbuf = subdirective.extract(substream, comment_stack, singular_msgbuf) elif isinstance(subdirective, PluralDirective): plural_msgbuf = subdirective.extract(substream, comment_stack, plural_msgbuf) elif not isinstance(subdirective, StripDirective): singular_msgbuf.append(kind, event, pos) plural_msgbuf.append(kind, event, pos) else: singular_msgbuf.append(kind, event, pos) plural_msgbuf.append(kind, event, pos) yield 'ngettext', \ (singular_msgbuf.format(), plural_msgbuf.format()), \ comment_stack[-1:] class DomainDirective(I18NDirective): """Implementation of the ``i18n:domain`` directive which allows choosing another i18n domain(catalog) to translate from. >>> from genshi.filters.tests.i18n import DummyTranslations >>> tmpl = MarkupTemplate('''\ <html xmlns:i18n="http://genshi.edgewall.org/i18n"> ... <p i18n:msg="">Bar</p> ... <div i18n:domain="foo"> ... <p i18n:msg="">FooBar</p> ... <p>Bar</p> ... <p i18n:domain="bar" i18n:msg="">Bar</p> ... <p i18n:domain="">Bar</p> ... </div> ... <p>Bar</p> ... </html>''') >>> translations = DummyTranslations({'Bar': 'Voh'}) >>> translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'foo_Bar'}) >>> translations.add_domain('bar', {'Bar': 'bar_Bar'}) >>> translator = Translator(translations) >>> translator.setup(tmpl) >>> print(tmpl.generate().render()) <html> <p>Voh</p> <div> <p>BarFoo</p> <p>foo_Bar</p> <p>bar_Bar</p> <p>Voh</p> </div> <p>Voh</p> </html> """ __slots__ = ['domain'] def __init__(self, value, template, hints=None, namespaces=None, lineno=-1, offset=-1): Directive.__init__(self, None, template, namespaces, lineno, offset) self.domain = value and value.strip() or '__DEFAULT__' @classmethod def attach(cls, template, stream, value, namespaces, pos): if type(value) is dict: value = value.get('name') return super(DomainDirective, cls).attach(template, stream, value, namespaces, pos) def __call__(self, stream, directives, ctxt, **vars): ctxt.push({'_i18n.domain': self.domain}) for event in _apply_directives(stream, directives, ctxt, vars): yield event ctxt.pop() class Translator(DirectiveFactory): """Can extract and translate localizable strings from markup streams and templates. For example, assume the following template: >>> tmpl = MarkupTemplate('''<html xmlns:py="http://genshi.edgewall.org/"> ... <head> ... <title>Example</title> ... </head> ... <body> ... <h1>Example</h1> ... <p>${_("Hello, %(name)s") % dict(name=username)}</p> ... </body> ... </html>''', filename='example.html') For demonstration, we define a dummy ``gettext``-style function with a hard-coded translation table, and pass that to the `Translator` initializer: >>> def pseudo_gettext(string): ... return { ... 'Example': 'Beispiel', ... 'Hello, %(name)s': 'Hallo, %(name)s' ... }[string] >>> translator = Translator(pseudo_gettext) Next, the translator needs to be prepended to any already defined filters on the template: >>> tmpl.filters.insert(0, translator) When generating the template output, our hard-coded translations should be applied as expected: >>> print(tmpl.generate(username='Hans', _=pseudo_gettext)) <html> <head> <title>Beispiel</title> </head> <body> <h1>Beispiel</h1> <p>Hallo, Hans</p> </body> </html> Note that elements defining ``xml:lang`` attributes that do not contain variable expressions are ignored by this filter. That can be used to exclude specific parts of a template from being extracted and translated. """ directives = [ ('domain', DomainDirective), ('comment', CommentDirective), ('msg', MsgDirective), ('choose', ChooseDirective), ('singular', SingularDirective), ('plural', PluralDirective) ] IGNORE_TAGS = frozenset([ QName('script'), QName('http://www.w3.org/1999/xhtml}script'), QName('style'), QName('http://www.w3.org/1999/xhtml}style') ]) INCLUDE_ATTRS = frozenset([ 'abbr', 'alt', 'label', 'prompt', 'standby', 'summary', 'title' ]) NAMESPACE = I18N_NAMESPACE def __init__(self, translate=NullTranslations(), ignore_tags=IGNORE_TAGS, include_attrs=INCLUDE_ATTRS, extract_text=True): """Initialize the translator. :param translate: the translation function, for example ``gettext`` or ``ugettext``. :param ignore_tags: a set of tag names that should not be localized :param include_attrs: a set of attribute names should be localized :param extract_text: whether the content of text nodes should be extracted, or only text in explicit ``gettext`` function calls :note: Changed in 0.6: the `translate` parameter can now be either a ``gettext``-style function, or an object compatible with the ``NullTransalations`` or ``GNUTranslations`` interface """ self.translate = translate self.ignore_tags = ignore_tags self.include_attrs = include_attrs self.extract_text = extract_text def __call__(self, stream, ctxt=None, search_text=True): """Translate any localizable strings in the given stream. This function shouldn't be called directly. Instead, an instance of the `Translator` class should be registered as a filter with the `Template` or the `TemplateLoader`, or applied as a regular stream filter. If used as a template filter, it should be inserted in front of all the default filters. :param stream: the markup event stream :param ctxt: the template context (not used) :param search_text: whether text nodes should be translated (used internally) :return: the localized stream """ ignore_tags = self.ignore_tags include_attrs = self.include_attrs skip = 0 xml_lang = XML_NAMESPACE['lang'] if type(self.translate) is FunctionType: gettext = self.translate if ctxt: ctxt['_i18n.gettext'] = gettext else: gettext = self.translate.ugettext try: dgettext = self.translate.dugettext except AttributeError: dgettext = lambda x, y: gettext(y) ngettext = self.translate.ungettext try: dngettext = self.translate.dungettext except AttributeError: dngettext = lambda d, s, p, n: ngettext(s, p, n) if ctxt: ctxt['_i18n.gettext'] = gettext ctxt['_i18n.ugettext'] = gettext ctxt['_i18n.dgettext'] = dgettext ctxt['_i18n.ngettext'] = ngettext ctxt['_i18n.ungettext'] = ngettext ctxt['_i18n.dngettext'] = dngettext extract_text = self.extract_text if not extract_text: search_text = False if ctxt and ctxt.get('_i18n.domain'): old_gettext = gettext gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg) for kind, data, pos in stream: # skip chunks that should not be localized if skip: if kind is START: skip += 1 elif kind is END: skip -= 1 yield kind, data, pos continue # handle different events that can be localized if kind is START: tag, attrs = data if tag in self.ignore_tags or \ isinstance(attrs.get(xml_lang), basestring): skip += 1 yield kind, data, pos continue new_attrs = [] changed = False for name, value in attrs: newval = value if extract_text and isinstance(value, basestring): if name in include_attrs: newval = gettext(value) else: newval = list( self(_ensure(value), ctxt, search_text=False) ) if newval != value: value = newval changed = True new_attrs.append((name, value)) if changed: attrs = Attrs(new_attrs) yield kind, (tag, attrs), pos elif search_text and kind is TEXT: text = data.strip() if text: data = data.replace(text, unicode(gettext(text))) yield kind, data, pos elif kind is SUB: directives, substream = data current_domain = None for idx, directive in enumerate(directives): # Organize directives to make everything work if isinstance(directive, DomainDirective): # Grab current domain and update context current_domain = directive.domain ctxt.push({'_i18n.domain': current_domain}) # Put domain directive as the first one in order to # update context before any other directives evaluation directives.insert(0, directives.pop(idx)) # If this is an i18n directive, no need to translate text # nodes here is_i18n_directive = any([ isinstance(d, ExtractableI18NDirective) for d in directives ]) substream = list(self(substream, ctxt, search_text=not is_i18n_directive)) yield kind, (directives, substream), pos if current_domain: ctxt.pop() else: yield kind, data, pos GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext', 'dgettext', 'dngettext', 'ugettext', 'ungettext') def extract(self, stream, gettext_functions=GETTEXT_FUNCTIONS, search_text=True, msgbuf=None, comment_stack=None): """Extract localizable strings from the given template stream. For every string found, this function yields a ``(lineno, function, message, comments)`` tuple, where: * ``lineno`` is the number of the line on which the string was found, * ``function`` is the name of the ``gettext`` function used (if the string was extracted from embedded Python code), and * ``message`` is the string itself (a ``unicode`` object, or a tuple of ``unicode`` objects for functions with multiple string arguments). * ``comments`` is a list of comments related to the message, extracted from ``i18n:comment`` attributes found in the markup >>> tmpl = MarkupTemplate('''<html xmlns:py="http://genshi.edgewall.org/"> ... <head> ... <title>Example</title> ... </head> ... <body> ... <h1>Example</h1> ... <p>${_("Hello, %(name)s") % dict(name=username)}</p> ... <p>${ngettext("You have %d item", "You have %d items", num)}</p> ... </body> ... </html>''', filename='example.html') >>> for line, func, msg, comments in Translator().extract(tmpl.stream): ... print('%d, %r, %r' % (line, func, msg)) 3, None, u'Example' 6, None, u'Example' 7, '_', u'Hello, %(name)s' 8, 'ngettext', (u'You have %d item', u'You have %d items', None) :param stream: the event stream to extract strings from; can be a regular stream or a template stream :param gettext_functions: a sequence of function names that should be treated as gettext-style localization functions :param search_text: whether the content of text nodes should be extracted (used internally) :note: Changed in 0.4.1: For a function with multiple string arguments (such as ``ngettext``), a single item with a tuple of strings is yielded, instead an item for each string argument. :note: Changed in 0.6: The returned tuples now include a fourth element, which is a list of comments for the translator. """ if not self.extract_text: search_text = False if comment_stack is None: comment_stack = [] skip = 0 # Un-comment bellow to extract messages without adding directives xml_lang = XML_NAMESPACE['lang'] for kind, data, pos in stream: if skip: if kind is START: skip += 1 if kind is END: skip -= 1 if kind is START and not skip: tag, attrs = data if tag in self.ignore_tags or \ isinstance(attrs.get(xml_lang), basestring): skip += 1 continue for name, value in attrs: if search_text and isinstance(value, basestring): if name in self.include_attrs: text = value.strip() if text: # XXX: Do we need to grab i18n:comment from comment_stack ??? yield pos[1], None, text, [] else: for lineno, funcname, text, comments in self.extract( _ensure(value), gettext_functions, search_text=False): yield lineno, funcname, text, comments if msgbuf: msgbuf.append(kind, data, pos) elif not skip and search_text and kind is TEXT: if not msgbuf: text = data.strip() if text and [ch for ch in text if ch.isalpha()]: yield pos[1], None, text, comment_stack[-1:] else: msgbuf.append(kind, data, pos) elif not skip and msgbuf and kind is END: msgbuf.append(kind, data, pos) if not msgbuf.depth: yield msgbuf.lineno, None, msgbuf.format(), [ c for c in msgbuf.comment if c ] msgbuf = None elif kind is EXPR or kind is EXEC: if msgbuf: msgbuf.append(kind, data, pos) for funcname, strings in extract_from_code(data, gettext_functions): # XXX: Do we need to grab i18n:comment from comment_stack ??? yield pos[1], funcname, strings, [] elif kind is SUB: directives, substream = data in_comment = False for idx, directive in enumerate(directives): # Do a first loop to see if there's a comment directive # If there is update context and pop it from directives if isinstance(directive, CommentDirective): in_comment = True comment_stack.append(directive.comment) if len(directives) == 1: # in case we're in the presence of something like: # <p i18n:comment="foo">Foo</p> messages = self.extract( substream, gettext_functions, search_text=search_text and not skip, msgbuf=msgbuf, comment_stack=comment_stack) for lineno, funcname, text, comments in messages: yield lineno, funcname, text, comments directives.pop(idx) elif not isinstance(directive, I18NDirective): # Remove all other non i18n directives from the process directives.pop(idx) if not directives and not in_comment: # Extract content if there's no directives because # strip was pop'ed and not because comment was pop'ed. # Extraction in this case has been taken care of. messages = self.extract( substream, gettext_functions, search_text=search_text and not skip, msgbuf=msgbuf) for lineno, funcname, text, comments in messages: yield lineno, funcname, text, comments for directive in directives: if isinstance(directive, ExtractableI18NDirective): messages = directive.extract(substream, comment_stack) for funcname, text, comments in messages: yield pos[1], funcname, text, comments else: messages = self.extract( substream, gettext_functions, search_text=search_text and not skip, msgbuf=msgbuf) for lineno, funcname, text, comments in messages: yield lineno, funcname, text, comments if in_comment: comment_stack.pop() def get_directive_index(self, dir_cls): total = len(self._dir_order) if dir_cls in self._dir_order: return self._dir_order.index(dir_cls) - total return total def setup(self, template): """Convenience function to register the `Translator` filter and the related directives with the given template. :param template: a `Template` instance """ template.filters.insert(0, self) if hasattr(template, 'add_directives'): template.add_directives(Translator.NAMESPACE, self) class MessageBuffer(object): """Helper class for managing internationalized mixed content. :since: version 0.5 """ def __init__(self, directive=None): """Initialize the message buffer. :param params: comma-separated list of parameter names :type params: `basestring` :param lineno: the line number on which the first stream event belonging to the message was found """ # params list needs to be copied so that directives can be evaluated # more than once self.orig_params = self.params = directive.params[:] self.directive = directive self.string = [] self.events = {} self.values = {} self.depth = 1 self.order = 1 self.stack = [0] self.subdirectives = {} def append(self, kind, data, pos): """Append a stream event to the buffer. :param kind: the stream event kind :param data: the event data :param pos: the position of the event in the source """ if kind is SUB: # The order needs to be +1 because a new START kind event will # happen and we we need to wrap those events into our custom kind(s) order = self.stack[-1] + 1 subdirectives, substream = data # Store the directives that should be applied after translation self.subdirectives.setdefault(order, []).extend(subdirectives) self.events.setdefault(order, []).append((SUB_START, None, pos)) for skind, sdata, spos in substream: self.append(skind, sdata, spos) self.events.setdefault(order, []).append((SUB_END, None, pos)) elif kind is TEXT: if '[' in data or ']' in data: # Quote [ and ] if it ain't us adding it, ie, if the user is # using those chars in his templates, escape them data = data.replace('[', '\[').replace(']', '\]') self.string.append(data) self.events.setdefault(self.stack[-1], []).append((kind, data, pos)) elif kind is EXPR: if self.params: param = self.params.pop(0) else: params = ', '.join(['"%s"' % p for p in self.orig_params if p]) if params: params = "(%s)" % params raise IndexError("%d parameters%s given to 'i18n:%s' but " "%d or more expressions used in '%s', line %s" % (len(self.orig_params), params, self.directive.tagname, len(self.orig_params)+1, os.path.basename(pos[0] or 'In Memmory Template'), pos[1])) self.string.append('%%(%s)s' % param) self.events.setdefault(self.stack[-1], []).append((kind, data, pos)) self.values[param] = (kind, data, pos) else: if kind is START: self.string.append('[%d:' % self.order) self.stack.append(self.order) self.events.setdefault(self.stack[-1], []).append((kind, data, pos)) self.depth += 1 self.order += 1 elif kind is END: self.depth -= 1 if self.depth: self.events[self.stack[-1]].append((kind, data, pos)) self.string.append(']') self.stack.pop() def format(self): """Return a message identifier representing the content in the buffer. """ return ''.join(self.string).strip() def translate(self, string, regex=re.compile(r'%\((\w+)\)s')): """Interpolate the given message translation with the events in the buffer and return the translated stream. :param string: the translated message string """ substream = None def yield_parts(string): for idx, part in enumerate(regex.split(string)): if idx % 2: yield self.values[part] elif part: yield (TEXT, part.replace('\[', '[').replace('\]', ']'), (None, -1, -1) ) parts = parse_msg(string) parts_counter = {} for order, string in parts: parts_counter.setdefault(order, []).append(None) while parts: order, string = parts.pop(0) if len(parts_counter[order]) == 1: events = self.events[order] else: events = [self.events[order].pop(0)] parts_counter[order].pop() for event in events: if event[0] is SUB_START: substream = [] elif event[0] is SUB_END: # Yield a substream which might have directives to be # applied to it (after translation events) yield SUB, (self.subdirectives[order], substream), event[2] substream = None elif event[0] is TEXT: if string: for part in yield_parts(string): if substream is not None: substream.append(part) else: yield part # String handled, reset it string = None elif event[0] is START: if substream is not None: substream.append(event) else: yield event if string: for part in yield_parts(string): if substream is not None: substream.append(part) else: yield part # String handled, reset it string = None elif event[0] is END: if string: for part in yield_parts(string): if substream is not None: substream.append(part) else: yield part # String handled, reset it string = None if substream is not None: substream.append(event) else: yield event elif event[0] is EXPR: # These are handled on the strings itself continue else: if string: for part in yield_parts(string): if substream is not None: substream.append(part) else: yield part # String handled, reset it string = None if substream is not None: substream.append(event) else: yield event def parse_msg(string, regex=re.compile(r'(?:\[(\d+)\:)|(?<!\\)\]')): """Parse a translated message using Genshi mixed content message formatting. >>> parse_msg("See [1:Help].") [(0, 'See '), (1, 'Help'), (0, '.')] >>> parse_msg("See [1:our [2:Help] page] for details.") [(0, 'See '), (1, 'our '), (2, 'Help'), (1, ' page'), (0, ' for details.')] >>> parse_msg("[2:Details] finden Sie in [1:Hilfe].") [(2, 'Details'), (0, ' finden Sie in '), (1, 'Hilfe'), (0, '.')] >>> parse_msg("[1:] Bilder pro Seite anzeigen.") [(1, ''), (0, ' Bilder pro Seite anzeigen.')] :param string: the translated message string :return: a list of ``(order, string)`` tuples :rtype: `list` """ parts = [] stack = [0] while True: mo = regex.search(string) if not mo: break if mo.start() or stack[-1]: parts.append((stack[-1], string[:mo.start()])) string = string[mo.end():] orderno = mo.group(1) if orderno is not None: stack.append(int(orderno)) else: stack.pop() if not stack: break if string: parts.append((stack[-1], string)) return parts def extract_from_code(code, gettext_functions): """Extract strings from Python bytecode. >>> from genshi.template.eval import Expression >>> expr = Expression('_("Hello")') >>> list(extract_from_code(expr, Translator.GETTEXT_FUNCTIONS)) [('_', u'Hello')] >>> expr = Expression('ngettext("You have %(num)s item", ' ... '"You have %(num)s items", num)') >>> list(extract_from_code(expr, Translator.GETTEXT_FUNCTIONS)) [('ngettext', (u'You have %(num)s item', u'You have %(num)s items', None))] :param code: the `Code` object :type code: `genshi.template.eval.Code` :param gettext_functions: a sequence of function names :since: version 0.5 """ def _walk(node): if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \ and node.func.id in gettext_functions: strings = [] def _add(arg): if isinstance(arg, _ast.Str) and isinstance(arg.s, basestring): strings.append(unicode(arg.s, 'utf-8')) elif arg: strings.append(None) [_add(arg) for arg in node.args] _add(node.starargs) _add(node.kwargs) if len(strings) == 1: strings = strings[0] else: strings = tuple(strings) yield node.func.id, strings elif node._fields: children = [] for field in node._fields: child = getattr(node, field, None) if isinstance(child, list): for elem in child: children.append(elem) elif isinstance(child, _ast.AST): children.append(child) for child in children: for funcname, strings in _walk(child): yield funcname, strings return _walk(code.ast) def extract(fileobj, keywords, comment_tags, options): """Babel extraction method for Genshi templates. :param fileobj: the file-like object the messages should be extracted from :param keywords: a list of keywords (i.e. function names) that should be recognized as translation functions :param comment_tags: a list of translator tags to search for and include in the results :param options: a dictionary of additional options (optional) :return: an iterator over ``(lineno, funcname, message, comments)`` tuples :rtype: ``iterator`` """ template_class = options.get('template_class', MarkupTemplate) if isinstance(template_class, basestring): module, clsname = template_class.split(':', 1) template_class = getattr(__import__(module, {}, {}, [clsname]), clsname) encoding = options.get('encoding', None) extract_text = options.get('extract_text', True) if isinstance(extract_text, basestring): extract_text = extract_text.lower() in ('1', 'on', 'yes', 'true') ignore_tags = options.get('ignore_tags', Translator.IGNORE_TAGS) if isinstance(ignore_tags, basestring): ignore_tags = ignore_tags.split() ignore_tags = [QName(tag) for tag in ignore_tags] include_attrs = options.get('include_attrs', Translator.INCLUDE_ATTRS) if isinstance(include_attrs, basestring): include_attrs = include_attrs.split() include_attrs = [QName(attr) for attr in include_attrs] tmpl = template_class(fileobj, filename=getattr(fileobj, 'name', None), encoding=encoding) translator = Translator(None, ignore_tags, include_attrs, extract_text) if hasattr(tmpl, 'add_directives'): tmpl.add_directives(Translator.NAMESPACE, translator) for message in translator.extract(tmpl.stream, gettext_functions=keywords): yield message
[ [ 8, 0, 0.0141, 0.0051, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 7, 0, 0.0192, 0.0034, 0, 0.66, 0.0333, 0, 0, 1, 0, 0, 0, 0, 0 ], [ 8, 1, 0.0188, 0.0009, 1, 0.48, ...
[ "\"\"\"Directives and utilities for internationalization and localization of\ntemplates.\n\n:since: version 0.4\n:note: Directives support added since version 0.6\n\"\"\"", "try:\n any\nexcept NameError:\n from genshi.util import any", " any", " from genshi.util import any", "from gettext import...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """This package provides various means for generating and processing web markup (XML or HTML). The design is centered around the concept of streams of markup events (similar in concept to SAX parsing events) which can be processed in a uniform manner independently of where or how they are produced. """ __docformat__ = 'restructuredtext en' __version__ = '0.6' from genshi.core import * from genshi.input import ParseError, XML, HTML
[ [ 8, 0, 0.6538, 0.2692, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.8462, 0.0385, 0, 0.66, 0.25, 959, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.8846, 0.0385, 0, 0.66, ...
[ "\"\"\"This package provides various means for generating and processing web markup\n(XML or HTML).\n\nThe design is centered around the concept of streams of markup events (similar\nin concept to SAX parsing events) which can be processed in a uniform manner\nindependently of where or how they are produced.\n\"\"\...
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Support for programmatically generating markup streams from Python code using a very simple syntax. The main entry point to this module is the `tag` object (which is actually an instance of the ``ElementFactory`` class). You should rarely (if ever) need to directly import and use any of the other classes in this module. Elements can be created using the `tag` object using attribute access. For example: >>> doc = tag.p('Some text and ', tag.a('a link', href='http://example.org/'), '.') >>> doc <Element "p"> This produces an `Element` instance which can be further modified to add child nodes and attributes. This is done by "calling" the element: positional arguments are added as child nodes (alternatively, the `Element.append` method can be used for that purpose), whereas keywords arguments are added as attributes: >>> doc(tag.br) <Element "p"> >>> print(doc) <p>Some text and <a href="http://example.org/">a link</a>.<br/></p> If an attribute name collides with a Python keyword, simply append an underscore to the name: >>> doc(class_='intro') <Element "p"> >>> print(doc) <p class="intro">Some text and <a href="http://example.org/">a link</a>.<br/></p> As shown above, an `Element` can easily be directly rendered to XML text by printing it or using the Python ``str()`` function. This is basically a shortcut for converting the `Element` to a stream and serializing that stream: >>> stream = doc.generate() >>> stream #doctest: +ELLIPSIS <genshi.core.Stream object at ...> >>> print(stream) <p class="intro">Some text and <a href="http://example.org/">a link</a>.<br/></p> The `tag` object also allows creating "fragments", which are basically lists of nodes (elements or text) that don't have a parent element. This can be useful for creating snippets of markup that are attached to a parent element later (for example in a template). Fragments are created by calling the `tag` object, which returns an object of type `Fragment`: >>> fragment = tag('Hello, ', tag.em('world'), '!') >>> fragment <Fragment> >>> print(fragment) Hello, <em>world</em>! """ from genshi.core import Attrs, Markup, Namespace, QName, Stream, \ START, END, TEXT __all__ = ['Fragment', 'Element', 'ElementFactory', 'tag'] __docformat__ = 'restructuredtext en' class Fragment(object): """Represents a markup fragment, which is basically just a list of element or text nodes. """ __slots__ = ['children'] def __init__(self): """Create a new fragment.""" self.children = [] def __add__(self, other): return Fragment()(self, other) def __call__(self, *args): """Append any positional arguments as child nodes. :see: `append` """ for arg in args: self.append(arg) return self def __iter__(self): return self._generate() def __repr__(self): return '<%s>' % type(self).__name__ def __str__(self): return str(self.generate()) def __unicode__(self): return unicode(self.generate()) def __html__(self): return Markup(self.generate()) def append(self, node): """Append an element or string as child node. :param node: the node to append; can be an `Element`, `Fragment`, or a `Stream`, or a Python string or number """ if isinstance(node, (Stream, Element, basestring, int, float, long)): # For objects of a known/primitive type, we avoid the check for # whether it is iterable for better performance self.children.append(node) elif isinstance(node, Fragment): self.children.extend(node.children) elif node is not None: try: for child in node: self.append(child) except TypeError: self.children.append(node) def _generate(self): for child in self.children: if isinstance(child, Fragment): for event in child._generate(): yield event elif isinstance(child, Stream): for event in child: yield event else: if not isinstance(child, basestring): child = unicode(child) yield TEXT, child, (None, -1, -1) def generate(self): """Return a markup event stream for the fragment. :rtype: `Stream` """ return Stream(self._generate()) def _kwargs_to_attrs(kwargs): attrs = [] names = set() for name, value in kwargs.items(): name = name.rstrip('_').replace('_', '-') if value is not None and name not in names: attrs.append((QName(name), unicode(value))) names.add(name) return Attrs(attrs) class Element(Fragment): """Simple XML output generator based on the builder pattern. Construct XML elements by passing the tag name to the constructor: >>> print(Element('strong')) <strong/> Attributes can be specified using keyword arguments. The values of the arguments will be converted to strings and any special XML characters escaped: >>> print(Element('textarea', rows=10, cols=60)) <textarea rows="10" cols="60"/> >>> print(Element('span', title='1 < 2')) <span title="1 &lt; 2"/> >>> print(Element('span', title='"baz"')) <span title="&#34;baz&#34;"/> The " character is escaped using a numerical entity. The order in which attributes are rendered is undefined. If an attribute value evaluates to `None`, that attribute is not included in the output: >>> print(Element('a', name=None)) <a/> Attribute names that conflict with Python keywords can be specified by appending an underscore: >>> print(Element('div', class_='warning')) <div class="warning"/> Nested elements can be added to an element using item access notation. The call notation can also be used for this and for adding attributes using keyword arguments, as one would do in the constructor. >>> print(Element('ul')(Element('li'), Element('li'))) <ul><li/><li/></ul> >>> print(Element('a')('Label')) <a>Label</a> >>> print(Element('a')('Label', href="target")) <a href="target">Label</a> Text nodes can be nested in an element by adding strings instead of elements. Any special characters in the strings are escaped automatically: >>> print(Element('em')('Hello world')) <em>Hello world</em> >>> print(Element('em')(42)) <em>42</em> >>> print(Element('em')('1 < 2')) <em>1 &lt; 2</em> This technique also allows mixed content: >>> print(Element('p')('Hello ', Element('b')('world'))) <p>Hello <b>world</b></p> Quotes are not escaped inside text nodes: >>> print(Element('p')('"Hello"')) <p>"Hello"</p> Elements can also be combined with other elements or strings using the addition operator, which results in a `Fragment` object that contains the operands: >>> print(Element('br') + 'some text' + Element('br')) <br/>some text<br/> Elements with a namespace can be generated using the `Namespace` and/or `QName` classes: >>> from genshi.core import Namespace >>> xhtml = Namespace('http://www.w3.org/1999/xhtml') >>> print(Element(xhtml.html, lang='en')) <html xmlns="http://www.w3.org/1999/xhtml" lang="en"/> """ __slots__ = ['tag', 'attrib'] def __init__(self, tag_, **attrib): Fragment.__init__(self) self.tag = QName(tag_) self.attrib = _kwargs_to_attrs(attrib) def __call__(self, *args, **kwargs): """Append any positional arguments as child nodes, and keyword arguments as attributes. :return: the element itself so that calls can be chained :rtype: `Element` :see: `Fragment.append` """ self.attrib |= _kwargs_to_attrs(kwargs) Fragment.__call__(self, *args) return self def __repr__(self): return '<%s "%s">' % (type(self).__name__, self.tag) def _generate(self): yield START, (self.tag, self.attrib), (None, -1, -1) for kind, data, pos in Fragment._generate(self): yield kind, data, pos yield END, self.tag, (None, -1, -1) def generate(self): """Return a markup event stream for the fragment. :rtype: `Stream` """ return Stream(self._generate()) class ElementFactory(object): """Factory for `Element` objects. A new element is created simply by accessing a correspondingly named attribute of the factory object: >>> factory = ElementFactory() >>> print(factory.foo) <foo/> >>> print(factory.foo(id=2)) <foo id="2"/> Markup fragments (lists of nodes without a parent element) can be created by calling the factory: >>> print(factory('Hello, ', factory.em('world'), '!')) Hello, <em>world</em>! A factory can also be bound to a specific namespace: >>> factory = ElementFactory('http://www.w3.org/1999/xhtml') >>> print(factory.html(lang="en")) <html xmlns="http://www.w3.org/1999/xhtml" lang="en"/> The namespace for a specific element can be altered on an existing factory by specifying the new namespace using item access: >>> factory = ElementFactory() >>> print(factory.html(factory['http://www.w3.org/2000/svg'].g(id=3))) <html><g xmlns="http://www.w3.org/2000/svg" id="3"/></html> Usually, the `ElementFactory` class is not be used directly. Rather, the `tag` instance should be used to create elements. """ def __init__(self, namespace=None): """Create the factory, optionally bound to the given namespace. :param namespace: the namespace URI for any created elements, or `None` for no namespace """ if namespace and not isinstance(namespace, Namespace): namespace = Namespace(namespace) self.namespace = namespace def __call__(self, *args): """Create a fragment that has the given positional arguments as child nodes. :return: the created `Fragment` :rtype: `Fragment` """ return Fragment()(*args) def __getitem__(self, namespace): """Return a new factory that is bound to the specified namespace. :param namespace: the namespace URI or `Namespace` object :return: an `ElementFactory` that produces elements bound to the given namespace :rtype: `ElementFactory` """ return ElementFactory(namespace) def __getattr__(self, name): """Create an `Element` with the given name. :param name: the tag name of the element to create :return: an `Element` with the specified name :rtype: `Element` """ return Element(self.namespace and self.namespace[name] or name) tag = ElementFactory() """Global `ElementFactory` bound to the default namespace. :type: `ElementFactory` """
[ [ 8, 0, 0.1156, 0.156, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1992, 0.0056, 0, 0.66, 0.1111, 987, 0, 8, 0, 0, 987, 0, 0 ], [ 14, 0, 0.2061, 0.0028, 0, 0.66...
[ "\"\"\"Support for programmatically generating markup streams from Python code using\na very simple syntax. The main entry point to this module is the `tag` object\n(which is actually an instance of the ``ElementFactory`` class). You should\nrarely (if ever) need to directly import and use any of the other classes ...
import unittest import doctest class OptionalExtensionTestSuite(unittest.TestSuite): def run(self, result): import simplejson run = unittest.TestSuite.run run(self, result) simplejson._toggle_speedups(False) run(self, result) simplejson._toggle_speedups(True) return result def additional_tests(suite=None): import simplejson import simplejson.encoder import simplejson.decoder if suite is None: suite = unittest.TestSuite() for mod in (simplejson, simplejson.encoder, simplejson.decoder): suite.addTest(doctest.DocTestSuite(mod)) suite.addTest(doctest.DocFileSuite('../../index.rst')) return suite def all_tests_suite(): suite = unittest.TestLoader().loadTestsFromNames([ 'simplejson.tests.test_check_circular', 'simplejson.tests.test_decode', 'simplejson.tests.test_default', 'simplejson.tests.test_dump', 'simplejson.tests.test_encode_basestring_ascii', 'simplejson.tests.test_encode_for_html', 'simplejson.tests.test_fail', 'simplejson.tests.test_float', 'simplejson.tests.test_indent', 'simplejson.tests.test_pass1', 'simplejson.tests.test_pass2', 'simplejson.tests.test_pass3', 'simplejson.tests.test_recursion', 'simplejson.tests.test_scanstring', 'simplejson.tests.test_separators', 'simplejson.tests.test_speedups', 'simplejson.tests.test_unicode', 'simplejson.tests.test_decimal', ]) suite = additional_tests(suite) return OptionalExtensionTestSuite([suite]) def main(): runner = unittest.TextTestRunner() suite = all_tests_suite() runner.run(suite) if __name__ == '__main__': import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) main()
[ [ 1, 0, 0.0159, 0.0159, 0, 0.66, 0, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.0317, 0.0159, 0, 0.66, 0.1667, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 3, 0, 0.1429, 0.1429, 0, 0....
[ "import unittest", "import doctest", "class OptionalExtensionTestSuite(unittest.TestSuite):\n def run(self, result):\n import simplejson\n run = unittest.TestSuite.run\n run(self, result)\n simplejson._toggle_speedups(False)\n run(self, result)\n simplejson._toggle_s...
"""Implementation of JSONEncoder """ import re from decimal import Decimal def _import_speedups(): try: from simplejson import _speedups return _speedups.encode_basestring_ascii, _speedups.make_encoder except ImportError: return None, None c_encode_basestring_ascii, c_make_encoder = _import_speedups() from simplejson.decoder import PosInf ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') ESCAPE_DCT = { '\\': '\\\\', '"': '\\"', '\b': '\\b', '\f': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', } for i in range(0x20): #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) FLOAT_REPR = repr def encode_basestring(s): """Return a JSON representation of a Python string """ if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): return ESCAPE_DCT[match.group(0)] return u'"' + ESCAPE.sub(replace, s) + u'"' def py_encode_basestring_ascii(s): """Return an ASCII-only JSON representation of a Python string """ if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): s = match.group(0) try: return ESCAPE_DCT[s] except KeyError: n = ord(s) if n < 0x10000: #return '\\u{0:04x}'.format(n) return '\\u%04x' % (n,) else: # surrogate pair n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) return '\\u%04x\\u%04x' % (s1, s2) return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' encode_basestring_ascii = ( c_encode_basestring_ascii or py_encode_basestring_ascii) class JSONEncoder(object): """Extensible JSON <http://json.org> encoder for Python data structures. Supports the following objects and types by default: +-------------------+---------------+ | Python | JSON | +===================+===============+ | dict | object | +-------------------+---------------+ | list, tuple | array | +-------------------+---------------+ | str, unicode | string | +-------------------+---------------+ | int, long, float | number | +-------------------+---------------+ | True | true | +-------------------+---------------+ | False | false | +-------------------+---------------+ | None | null | +-------------------+---------------+ To extend this to recognize other objects, subclass and implement a ``.default()`` method with another method that returns a serializable object for ``o`` if possible, otherwise it should call the superclass implementation (to raise ``TypeError``). """ item_separator = ', ' key_separator = ': ' def __init__(self, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, sort_keys=False, indent=None, separators=None, encoding='utf-8', default=None, use_decimal=False): """Constructor for JSONEncoder, with sensible defaults. If skipkeys is false, then it is a TypeError to attempt encoding of keys that are not str, int, long, float or None. If skipkeys is True, such items are simply skipped. If ensure_ascii is true, the output is guaranteed to be str objects with all incoming unicode characters escaped. If ensure_ascii is false, the output will be unicode object. If check_circular is true, then lists, dicts, and custom encoded objects will be checked for circular references during encoding to prevent an infinite recursion (which would cause an OverflowError). Otherwise, no such check takes place. If allow_nan is true, then NaN, Infinity, and -Infinity will be encoded as such. This behavior is not JSON specification compliant, but is consistent with most JavaScript based encoders and decoders. Otherwise, it will be a ValueError to encode such floats. If sort_keys is true, then the output of dictionaries will be sorted by key; this is useful for regression tests to ensure that JSON serializations can be compared on a day-to-day basis. If indent is a string, then JSON array elements and object members will be pretty-printed with a newline followed by that string repeated for each level of nesting. ``None`` (the default) selects the most compact representation without any newlines. For backwards compatibility with versions of simplejson earlier than 2.1.0, an integer is also accepted and is converted to a string with that many spaces. If specified, separators should be a (item_separator, key_separator) tuple. The default is (', ', ': '). To get the most compact JSON representation you should specify (',', ':') to eliminate whitespace. If specified, default is a function that gets called for objects that can't otherwise be serialized. It should return a JSON encodable version of the object or raise a ``TypeError``. If encoding is not None, then all input strings will be transformed into unicode using that encoding prior to JSON-encoding. The default is UTF-8. If use_decimal is true (not the default), ``decimal.Decimal`` will be supported directly by the encoder. For the inverse, decode JSON with ``parse_float=decimal.Decimal``. """ self.skipkeys = skipkeys self.ensure_ascii = ensure_ascii self.check_circular = check_circular self.allow_nan = allow_nan self.sort_keys = sort_keys self.use_decimal = use_decimal if isinstance(indent, (int, long)): indent = ' ' * indent self.indent = indent if separators is not None: self.item_separator, self.key_separator = separators if default is not None: self.default = default self.encoding = encoding def default(self, o): """Implement this method in a subclass such that it returns a serializable object for ``o``, or calls the base implementation (to raise a ``TypeError``). For example, to support arbitrary iterators, you could implement default like this:: def default(self, o): try: iterable = iter(o) except TypeError: pass else: return list(iterable) return JSONEncoder.default(self, o) """ raise TypeError(repr(o) + " is not JSON serializable") def encode(self, o): """Return a JSON string representation of a Python data structure. >>> from simplejson import JSONEncoder >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) '{"foo": ["bar", "baz"]}' """ # This is for extremely simple cases and benchmarks. if isinstance(o, basestring): if isinstance(o, str): _encoding = self.encoding if (_encoding is not None and not (_encoding == 'utf-8')): o = o.decode(_encoding) if self.ensure_ascii: return encode_basestring_ascii(o) else: return encode_basestring(o) # This doesn't pass the iterator directly to ''.join() because the # exceptions aren't as detailed. The list call should be roughly # equivalent to the PySequence_Fast that ''.join() would do. chunks = self.iterencode(o, _one_shot=True) if not isinstance(chunks, (list, tuple)): chunks = list(chunks) if self.ensure_ascii: return ''.join(chunks) else: return u''.join(chunks) def iterencode(self, o, _one_shot=False): """Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk) """ if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring if self.encoding != 'utf-8': def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): if isinstance(o, str): o = o.decode(_encoding) return _orig_encoder(o) def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf): # Check for specials. Note that this type of test is processor # and/or platform-specific, so do tests which don't depend on # the internals. if o != o: text = 'NaN' elif o == _inf: text = 'Infinity' elif o == _neginf: text = '-Infinity' else: return _repr(o) if not allow_nan: raise ValueError( "Out of range float values are not JSON compliant: " + repr(o)) return text key_memo = {} if (_one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys): _iterencode = c_make_encoder( markers, self.default, _encoder, self.indent, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, self.allow_nan, key_memo, self.use_decimal) else: _iterencode = _make_iterencode( markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot, self.use_decimal) try: return _iterencode(o, 0) finally: key_memo.clear() class JSONEncoderForHTML(JSONEncoder): """An encoder that produces JSON safe to embed in HTML. To embed JSON content in, say, a script tag on a web page, the characters &, < and > should be escaped. They cannot be escaped with the usual entities (e.g. &amp;) because they are not expanded within <script> tags. """ def encode(self, o): # Override JSONEncoder.encode because it has hacks for # performance that make things more complicated. chunks = self.iterencode(o, True) if self.ensure_ascii: return ''.join(chunks) else: return u''.join(chunks) def iterencode(self, o, _one_shot=False): chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot) for chunk in chunks: chunk = chunk.replace('&', '\\u0026') chunk = chunk.replace('<', '\\u003c') chunk = chunk.replace('>', '\\u003e') yield chunk def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, _use_decimal, ## HACK: hand-optimized bytecode; turn globals into locals False=False, True=True, ValueError=ValueError, basestring=basestring, Decimal=Decimal, dict=dict, float=float, id=id, int=int, isinstance=isinstance, list=list, long=long, str=str, tuple=tuple, ): def _iterencode_list(lst, _current_indent_level): if not lst: yield '[]' return if markers is not None: markerid = id(lst) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = lst buf = '[' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (_indent * _current_indent_level) separator = _item_separator + newline_indent buf += newline_indent else: newline_indent = None separator = _item_separator first = True for value in lst: if first: first = False else: buf = separator if isinstance(value, basestring): yield buf + _encoder(value) elif value is None: yield buf + 'null' elif value is True: yield buf + 'true' elif value is False: yield buf + 'false' elif isinstance(value, (int, long)): yield buf + str(value) elif isinstance(value, float): yield buf + _floatstr(value) elif _use_decimal and isinstance(value, Decimal): yield buf + str(value) else: yield buf if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (_indent * _current_indent_level) yield ']' if markers is not None: del markers[markerid] def _iterencode_dict(dct, _current_indent_level): if not dct: yield '{}' return if markers is not None: markerid = id(dct) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = dct yield '{' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (_indent * _current_indent_level) item_separator = _item_separator + newline_indent yield newline_indent else: newline_indent = None item_separator = _item_separator first = True if _sort_keys: items = dct.items() items.sort(key=lambda kv: kv[0]) else: items = dct.iteritems() for key, value in items: if isinstance(key, basestring): pass # JavaScript is weakly typed for these, so it makes sense to # also allow them. Many encoders seem to do something like this. elif isinstance(key, float): key = _floatstr(key) elif key is True: key = 'true' elif key is False: key = 'false' elif key is None: key = 'null' elif isinstance(key, (int, long)): key = str(key) elif _skipkeys: continue else: raise TypeError("key " + repr(key) + " is not a string") if first: first = False else: yield item_separator yield _encoder(key) yield _key_separator if isinstance(value, basestring): yield _encoder(value) elif value is None: yield 'null' elif value is True: yield 'true' elif value is False: yield 'false' elif isinstance(value, (int, long)): yield str(value) elif isinstance(value, float): yield _floatstr(value) elif _use_decimal and isinstance(value, Decimal): yield str(value) else: if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (_indent * _current_indent_level) yield '}' if markers is not None: del markers[markerid] def _iterencode(o, _current_indent_level): if isinstance(o, basestring): yield _encoder(o) elif o is None: yield 'null' elif o is True: yield 'true' elif o is False: yield 'false' elif isinstance(o, (int, long)): yield str(o) elif isinstance(o, float): yield _floatstr(o) elif isinstance(o, (list, tuple)): for chunk in _iterencode_list(o, _current_indent_level): yield chunk elif isinstance(o, dict): for chunk in _iterencode_dict(o, _current_indent_level): yield chunk elif _use_decimal and isinstance(o, Decimal): yield str(o) else: if markers is not None: markerid = id(o) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = o o = _default(o) for chunk in _iterencode(o, _current_indent_level): yield chunk if markers is not None: del markers[markerid] return _iterencode
[ [ 8, 0, 0.003, 0.004, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.006, 0.002, 0, 0.66, 0.0588, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.008, 0.002, 0, 0.66, ...
[ "\"\"\"Implementation of JSONEncoder\n\"\"\"", "import re", "from decimal import Decimal", "def _import_speedups():\n try:\n from simplejson import _speedups\n return _speedups.encode_basestring_ascii, _speedups.make_encoder\n except ImportError:\n return None, None", " try:\n ...
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data interchange format. :mod:`simplejson` exposes an API familiar to users of the standard library :mod:`marshal` and :mod:`pickle` modules. It is the externally maintained version of the :mod:`json` library contained in Python 2.6, but maintains compatibility with Python 2.4 and Python 2.5 and (currently) has significant performance advantages, even without using the optional C extension for speedups. Encoding basic Python object hierarchies:: >>> import simplejson as json >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) '["foo", {"bar": ["baz", null, 1.0, 2]}]' >>> print json.dumps("\"foo\bar") "\"foo\bar" >>> print json.dumps(u'\u1234') "\u1234" >>> print json.dumps('\\') "\\" >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) {"a": 0, "b": 0, "c": 0} >>> from StringIO import StringIO >>> io = StringIO() >>> json.dump(['streaming API'], io) >>> io.getvalue() '["streaming API"]' Compact encoding:: >>> import simplejson as json >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) '[1,2,3,{"4":5,"6":7}]' Pretty printing:: >>> import simplejson as json >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ') >>> print '\n'.join([l.rstrip() for l in s.splitlines()]) { "4": 5, "6": 7 } Decoding JSON:: >>> import simplejson as json >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj True >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar' True >>> from StringIO import StringIO >>> io = StringIO('["streaming API"]') >>> json.load(io)[0] == 'streaming API' True Specializing JSON object decoding:: >>> import simplejson as json >>> def as_complex(dct): ... if '__complex__' in dct: ... return complex(dct['real'], dct['imag']) ... return dct ... >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', ... object_hook=as_complex) (1+2j) >>> from decimal import Decimal >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') True Specializing JSON object encoding:: >>> import simplejson as json >>> def encode_complex(obj): ... if isinstance(obj, complex): ... return [obj.real, obj.imag] ... raise TypeError(repr(o) + " is not JSON serializable") ... >>> json.dumps(2 + 1j, default=encode_complex) '[2.0, 1.0]' >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) '[2.0, 1.0]' >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) '[2.0, 1.0]' Using simplejson.tool from the shell to validate and pretty-print:: $ echo '{"json":"obj"}' | python -m simplejson.tool { "json": "obj" } $ echo '{ 1.2:3.4}' | python -m simplejson.tool Expecting property name: line 1 column 2 (char 2) """ __version__ = '2.1.1' __all__ = [ 'dump', 'dumps', 'load', 'loads', 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder', 'OrderedDict', ] __author__ = 'Bob Ippolito <bob@redivi.com>' from decimal import Decimal from decoder import JSONDecoder, JSONDecodeError from encoder import JSONEncoder def _import_OrderedDict(): import collections try: return collections.OrderedDict except AttributeError: import ordered_dict return ordered_dict.OrderedDict OrderedDict = _import_OrderedDict() def _import_c_make_encoder(): try: from simplejson._speedups import make_encoder return make_encoder except ImportError: return None _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, encoding='utf-8', default=None, use_decimal=False, ) def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, use_decimal=False, **kw): """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is true then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is false, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is false, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is false, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If *indent* is a string, then JSON array elements and object members will be pretty-printed with a newline followed by that string repeated for each level of nesting. ``None`` (the default) selects the most compact representation without any newlines. For backwards compatibility with versions of simplejson earlier than 2.1.0, an integer is also accepted and is converted to a string with that many spaces. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. If *use_decimal* is true (default: ``False``) then decimal.Decimal will be natively serialized to JSON with full precision. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): iterable = _default_encoder.iterencode(obj) else: if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, use_decimal=use_decimal, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk) def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, use_decimal=False, **kw): """Serialize ``obj`` to a JSON formatted ``str``. If ``skipkeys`` is false then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is false, then the return value will be a ``unicode`` instance subject to normal Python ``str`` to ``unicode`` coercion rules instead of being escaped to an ASCII ``str``. If ``check_circular`` is false, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is false, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a string, then JSON array elements and object members will be pretty-printed with a newline followed by that string repeated for each level of nesting. ``None`` (the default) selects the most compact representation without any newlines. For backwards compatibility with versions of simplejson earlier than 2.1.0, an integer is also accepted and is converted to a string with that many spaces. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. If *use_decimal* is true (default: ``False``) then decimal.Decimal will be natively serialized to JSON with full precision. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not use_decimal and not kw): return _default_encoder.encode(obj) if cls is None: cls = JSONEncoder return cls( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, use_decimal=use_decimal, **kw).encode(obj) _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, use_decimal=False, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a JSON document) to a Python object. *encoding* determines the encoding used to interpret any :class:`str` objects decoded by this instance (``'utf-8'`` by default). It has no effect when decoding :class:`unicode` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as :class:`unicode`. *object_hook*, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given :class:`dict`. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). *object_pairs_hook* is an optional function that will be called with the result of any object literal decode with an ordered list of pairs. The return value of *object_pairs_hook* will be used instead of the :class:`dict`. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, :func:`collections.OrderedDict` will remember the order of insertion). If *object_hook* is also defined, the *object_pairs_hook* takes priority. *parse_float*, if specified, will be called with the string of every JSON float to be decoded. By default, this is equivalent to ``float(num_str)``. This can be used to use another datatype or parser for JSON floats (e.g. :class:`decimal.Decimal`). *parse_int*, if specified, will be called with the string of every JSON int to be decoded. By default, this is equivalent to ``int(num_str)``. This can be used to use another datatype or parser for JSON integers (e.g. :class:`float`). *parse_constant*, if specified, will be called with one of the following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This can be used to raise an exception if invalid JSON numbers are encountered. If *use_decimal* is true (default: ``False``) then it implies parse_float=decimal.Decimal for parity with ``dump``. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ return loads(fp.read(), encoding=encoding, cls=cls, object_hook=object_hook, parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, use_decimal=use_decimal, **kw) def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, use_decimal=False, **kw): """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) to a Python object. *encoding* determines the encoding used to interpret any :class:`str` objects decoded by this instance (``'utf-8'`` by default). It has no effect when decoding :class:`unicode` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as :class:`unicode`. *object_hook*, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given :class:`dict`. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). *object_pairs_hook* is an optional function that will be called with the result of any object literal decode with an ordered list of pairs. The return value of *object_pairs_hook* will be used instead of the :class:`dict`. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, :func:`collections.OrderedDict` will remember the order of insertion). If *object_hook* is also defined, the *object_pairs_hook* takes priority. *parse_float*, if specified, will be called with the string of every JSON float to be decoded. By default, this is equivalent to ``float(num_str)``. This can be used to use another datatype or parser for JSON floats (e.g. :class:`decimal.Decimal`). *parse_int*, if specified, will be called with the string of every JSON int to be decoded. By default, this is equivalent to ``int(num_str)``. This can be used to use another datatype or parser for JSON integers (e.g. :class:`float`). *parse_constant*, if specified, will be called with one of the following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This can be used to raise an exception if invalid JSON numbers are encountered. If *use_decimal* is true (default: ``False``) then it implies parse_float=decimal.Decimal for parity with ``dump``. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not use_decimal and not kw): return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: kw['object_hook'] = object_hook if object_pairs_hook is not None: kw['object_pairs_hook'] = object_pairs_hook if parse_float is not None: kw['parse_float'] = parse_float if parse_int is not None: kw['parse_int'] = parse_int if parse_constant is not None: kw['parse_constant'] = parse_constant if use_decimal: if parse_float is not None: raise TypeError("use_decimal=True implies parse_float=Decimal") kw['parse_float'] = Decimal return cls(encoding=encoding, **kw).decode(s) def _toggle_speedups(enabled): import simplejson.decoder as dec import simplejson.encoder as enc import simplejson.scanner as scan c_make_encoder = _import_c_make_encoder() if enabled: dec.scanstring = dec.c_scanstring or dec.py_scanstring enc.c_make_encoder = c_make_encoder enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or enc.py_encode_basestring_ascii) scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner else: dec.scanstring = dec.py_scanstring enc.c_make_encoder = None enc.encode_basestring_ascii = enc.py_encode_basestring_ascii scan.make_scanner = scan.py_make_scanner dec.make_scanner = scan.make_scanner global _default_decoder _default_decoder = JSONDecoder( encoding=None, object_hook=None, object_pairs_hook=None, ) global _default_encoder _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, encoding='utf-8', default=None, )
[ [ 8, 0, 0.1144, 0.2265, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.2288, 0.0023, 0, 0.66, 0.0625, 162, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.2357, 0.0114, 0, 0.66...
[ "r\"\"\"JSON (JavaScript Object Notation) <http://json.org> is a subset of\nJavaScript syntax (ECMA-262 3rd edition) used as a lightweight data\ninterchange format.\n\n:mod:`simplejson` exposes an API familiar to users of the standard library\n:mod:`marshal` and :mod:`pickle` modules. It is the externally maintaine...
"""JSON token scanner """ import re def _import_c_make_scanner(): try: from simplejson._speedups import make_scanner return make_scanner except ImportError: return None c_make_scanner = _import_c_make_scanner() __all__ = ['make_scanner'] NUMBER_RE = re.compile( r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', (re.VERBOSE | re.MULTILINE | re.DOTALL)) def py_make_scanner(context): parse_object = context.parse_object parse_array = context.parse_array parse_string = context.parse_string match_number = NUMBER_RE.match encoding = context.encoding strict = context.strict parse_float = context.parse_float parse_int = context.parse_int parse_constant = context.parse_constant object_hook = context.object_hook object_pairs_hook = context.object_pairs_hook memo = context.memo def _scan_once(string, idx): try: nextchar = string[idx] except IndexError: raise StopIteration if nextchar == '"': return parse_string(string, idx + 1, encoding, strict) elif nextchar == '{': return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook, object_pairs_hook, memo) elif nextchar == '[': return parse_array((string, idx + 1), _scan_once) elif nextchar == 'n' and string[idx:idx + 4] == 'null': return None, idx + 4 elif nextchar == 't' and string[idx:idx + 4] == 'true': return True, idx + 4 elif nextchar == 'f' and string[idx:idx + 5] == 'false': return False, idx + 5 m = match_number(string, idx) if m is not None: integer, frac, exp = m.groups() if frac or exp: res = parse_float(integer + (frac or '') + (exp or '')) else: res = parse_int(integer) return res, m.end() elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': return parse_constant('NaN'), idx + 3 elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': return parse_constant('Infinity'), idx + 8 elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': return parse_constant('-Infinity'), idx + 9 else: raise StopIteration def scan_once(string, idx): try: return _scan_once(string, idx) finally: memo.clear() return scan_once make_scanner = c_make_scanner or py_make_scanner
[ [ 8, 0, 0.0195, 0.026, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.039, 0.013, 0, 0.66, 0.1429, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 2, 0, 0.0844, 0.0779, 0, 0.66, ...
[ "\"\"\"JSON token scanner\n\"\"\"", "import re", "def _import_c_make_scanner():\n try:\n from simplejson._speedups import make_scanner\n return make_scanner\n except ImportError:\n return None", " try:\n from simplejson._speedups import make_scanner\n return make_sc...
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger http://code.activestate.com/recipes/576693/ """ from UserDict import DictMixin # Modified from original to support Python 2.4, see # http://code.google.com/p/simplejson/issues/detail?id=53 try: all except NameError: def all(seq): for elem in seq: if not elem: return False return True class OrderedDict(dict, DictMixin): def __init__(self, *args, **kwds): if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__end except AttributeError: self.clear() self.update(*args, **kwds) def clear(self): self.__end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.__map = {} # key --> [key, prev, next] dict.clear(self) def __setitem__(self, key, value): if key not in self: end = self.__end curr = end[1] curr[2] = end[1] = self.__map[key] = [key, curr, end] dict.__setitem__(self, key, value) def __delitem__(self, key): dict.__delitem__(self, key) key, prev, next = self.__map.pop(key) prev[2] = next next[1] = prev def __iter__(self): end = self.__end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.__end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] def popitem(self, last=True): if not self: raise KeyError('dictionary is empty') # Modified from original to support Python 2.4, see # http://code.google.com/p/simplejson/issues/detail?id=53 if last: key = reversed(self).next() else: key = iter(self).next() value = self.pop(key) return key, value def __reduce__(self): items = [[k, self[k]] for k in self] tmp = self.__map, self.__end del self.__map, self.__end inst_dict = vars(self).copy() self.__map, self.__end = tmp if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def keys(self): return list(self) setdefault = DictMixin.setdefault update = DictMixin.update pop = DictMixin.pop values = DictMixin.values items = DictMixin.items iterkeys = DictMixin.iterkeys itervalues = DictMixin.itervalues iteritems = DictMixin.iteritems def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) def copy(self): return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): if isinstance(other, OrderedDict): return len(self)==len(other) and \ all(p==q for p, q in zip(self.items(), other.items())) return dict.__eq__(self, other) def __ne__(self, other): return not self == other
[ [ 8, 0, 0.0252, 0.042, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0504, 0.0084, 0, 0.66, 0.3333, 351, 0, 1, 0, 0, 351, 0, 0 ], [ 7, 0, 0.1134, 0.0672, 0, 0.66,...
[ "\"\"\"Drop-in replacement for collections.OrderedDict by Raymond Hettinger\n\nhttp://code.activestate.com/recipes/576693/\n\n\"\"\"", "from UserDict import DictMixin", "try:\n all\nexcept NameError:\n def all(seq):\n for elem in seq:\n if not elem:\n return False\n ...
import logging import os #initialize logging def create_env(): """Sets logging level if LOGGING_DEBUG is set""" default_log_level = logging.INFO if os.environ.get("LOGGING_DEBUG") in ("1", "True", "on"): default_log_level = logging.DEBUG return default_log_level class Formatter(object): """A core log formatter""" def console_formatter(self): """Console Formatter""" format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" console_format = logging.Formatter(format) return console_format def initialize_handlers(): """Initializes Handlers""" default_log_level = create_env() handlers = [] formatter = Formatter() #setup console handler console = logging.StreamHandler() console.setFormatter(formatter.console_formatter()) console.setLevel(default_log_level) handlers.append(console) return handlers _log = None def logger(name, handlers=initialize_handlers): """Initializes Logging""" global _log if _log is not None: return _log else: log = logging.getLogger(name) log.setLevel(logging.DEBUG) for handler in handlers(): log.addHandler(handler) return log
[ [ 1, 0, 0.0204, 0.0204, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0408, 0.0204, 0, 0.66, 0.1667, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 2, 0, 0.1837, 0.1429, 0, ...
[ "import logging", "import os", "def create_env():\n \"\"\"Sets logging level if LOGGING_DEBUG is set\"\"\"\n\n default_log_level = logging.INFO\n if os.environ.get(\"LOGGING_DEBUG\") in (\"1\", \"True\", \"on\"):\n default_log_level = logging.DEBUG\n return default_log_level", " \"\"\"Se...
""" Author: Noah Gift Date: 08/05/2009 A really simple set of defaults for logging ============================================ You simply do this:: >>> from sensible.loginit import logger >>> log = logger("MyApp") >>> log.info("stuff") 2009-08-04 23:56:22,583 - MyApp - INFO - stuff Environmental Variable --------------------- If you want to print log.debug messages, you simply set the environmental variable export LOGGING_DEBUG = 1 """
[ [ 8, 0, 0.5227, 1, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ] ]
[ "\"\"\"\nAuthor: Noah Gift\nDate: 08/05/2009\n\nA really simple set of defaults for logging\n============================================\n\nYou simply do this::" ]
from _BookShelf import _BookShelf class BookShelf(_BookShelf): pass # Custom logic goes here.
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 291, 0, 1, 0, 0, 291, 0, 0 ], [ 3, 0, 0.7, 0.4, 0, 0.66, 1, 80, 0, 0, 0, 0, 291, 0, 0 ] ]
[ "from _BookShelf import _BookShelf", "class BookShelf(_BookShelf):\n pass" ]
""" Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net> This module offers extensions to the standard python 2.3+ datetime module. """ __author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" __license__ = "PSF License" __version__ = "1.5"
[ [ 8, 0, 0.3889, 0.6667, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.7778, 0.1111, 0, 0.66, 0.3333, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.8889, 0.1111, 0, 0.66...
[ "\"\"\"\nCopyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>\n\nThis module offers extensions to the standard python 2.3+\ndatetime module.\n\"\"\"", "__author__ = \"Gustavo Niemeyer <gustavo@niemeyer.net>\"", "__license__ = \"PSF License\"", "__version__ = \"1.5\"" ]
""" Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net> This module offers extensions to the standard python 2.3+ datetime module. """ from dateutil.tz import tzfile from tarfile import TarFile import os __author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" __license__ = "PSF License" __all__ = ["setcachesize", "gettz", "rebuild"] CACHE = [] CACHESIZE = 10 class tzfile(tzfile): def __reduce__(self): return (gettz, (self._filename,)) def getzoneinfofile(): filenames = os.listdir(os.path.join(os.path.dirname(__file__))) filenames.sort() filenames.reverse() for entry in filenames: if entry.startswith("zoneinfo") and ".tar." in entry: return os.path.join(os.path.dirname(__file__), entry) return None ZONEINFOFILE = getzoneinfofile() del getzoneinfofile def setcachesize(size): global CACHESIZE, CACHE CACHESIZE = size del CACHE[size:] def gettz(name): tzinfo = None if ZONEINFOFILE: for cachedname, tzinfo in CACHE: if cachedname == name: break else: tf = TarFile.open(ZONEINFOFILE) try: zonefile = tf.extractfile(name) except KeyError: tzinfo = None else: tzinfo = tzfile(zonefile) tf.close() CACHE.insert(0, (name, tzinfo)) del CACHE[CACHESIZE:] return tzinfo def rebuild(filename, tag=None, format="gz"): import tempfile, shutil tmpdir = tempfile.mkdtemp() zonedir = os.path.join(tmpdir, "zoneinfo") moduledir = os.path.dirname(__file__) if tag: tag = "-"+tag targetname = "zoneinfo%s.tar.%s" % (tag, format) try: tf = TarFile.open(filename) for name in tf.getnames(): if not (name.endswith(".sh") or name.endswith(".tab") or name == "leapseconds"): tf.extract(name, tmpdir) filepath = os.path.join(tmpdir, name) os.system("zic -d %s %s" % (zonedir, filepath)) tf.close() target = os.path.join(moduledir, targetname) for entry in os.listdir(moduledir): if entry.startswith("zoneinfo") and ".tar." in entry: os.unlink(os.path.join(moduledir, entry)) tf = TarFile.open(target, "w:%s" % format) for entry in os.listdir(zonedir): entrypath = os.path.join(zonedir, entry) tf.add(entrypath, entry) tf.close() finally: shutil.rmtree(tmpdir)
[ [ 8, 0, 0.0402, 0.069, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0805, 0.0115, 0, 0.66, 0.0714, 872, 0, 1, 0, 0, 872, 0, 0 ], [ 1, 0, 0.092, 0.0115, 0, 0.66, ...
[ "\"\"\"\nCopyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net>\n\nThis module offers extensions to the standard python 2.3+\ndatetime module.\n\"\"\"", "from dateutil.tz import tzfile", "from tarfile import TarFile", "import os", "__author__ = \"Gustavo Niemeyer <gustavo@niemeyer.net>\"", "__li...
# This code was originally contributed by Jeffrey Harris. import datetime import struct import _winreg __author__ = "Jeffrey Harris & Gustavo Niemeyer <gustavo@niemeyer.net>" __all__ = ["tzwin", "tzwinlocal"] ONEWEEK = datetime.timedelta(7) TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" def _settzkeyname(): global TZKEYNAME handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) try: _winreg.OpenKey(handle, TZKEYNAMENT).Close() TZKEYNAME = TZKEYNAMENT except WindowsError: TZKEYNAME = TZKEYNAME9X handle.Close() _settzkeyname() class tzwinbase(datetime.tzinfo): """tzinfo class based on win32's timezones available in the registry.""" def utcoffset(self, dt): if self._isdst(dt): return datetime.timedelta(minutes=self._dstoffset) else: return datetime.timedelta(minutes=self._stdoffset) def dst(self, dt): if self._isdst(dt): minutes = self._dstoffset - self._stdoffset return datetime.timedelta(minutes=minutes) else: return datetime.timedelta(0) def tzname(self, dt): if self._isdst(dt): return self._dstname else: return self._stdname def list(): """Return a list of all time zones known to the system.""" handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) tzkey = _winreg.OpenKey(handle, TZKEYNAME) result = [_winreg.EnumKey(tzkey, i) for i in range(_winreg.QueryInfoKey(tzkey)[0])] tzkey.Close() handle.Close() return result list = staticmethod(list) def display(self): return self._display def _isdst(self, dt): dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek, self._dsthour, self._dstminute, self._dstweeknumber) dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek, self._stdhour, self._stdminute, self._stdweeknumber) if dston < dstoff: return dston <= dt.replace(tzinfo=None) < dstoff else: return not dstoff <= dt.replace(tzinfo=None) < dston class tzwin(tzwinbase): def __init__(self, name): self._name = name handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name)) keydict = valuestodict(tzkey) tzkey.Close() handle.Close() self._stdname = keydict["Std"].encode("iso-8859-1") self._dstname = keydict["Dlt"].encode("iso-8859-1") self._display = keydict["Display"] # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm tup = struct.unpack("=3l16h", keydict["TZI"]) self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1 (self._stdmonth, self._stddayofweek, # Sunday = 0 self._stdweeknumber, # Last = 5 self._stdhour, self._stdminute) = tup[4:9] (self._dstmonth, self._dstdayofweek, # Sunday = 0 self._dstweeknumber, # Last = 5 self._dsthour, self._dstminute) = tup[12:17] def __repr__(self): return "tzwin(%s)" % repr(self._name) def __reduce__(self): return (self.__class__, (self._name,)) class tzwinlocal(tzwinbase): def __init__(self): handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME) keydict = valuestodict(tzlocalkey) tzlocalkey.Close() self._stdname = keydict["StandardName"].encode("iso-8859-1") self._dstname = keydict["DaylightName"].encode("iso-8859-1") try: tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname)) _keydict = valuestodict(tzkey) self._display = _keydict["Display"] tzkey.Close() except OSError: self._display = None handle.Close() self._stdoffset = -keydict["Bias"]-keydict["StandardBias"] self._dstoffset = self._stdoffset-keydict["DaylightBias"] # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm tup = struct.unpack("=8h", keydict["StandardStart"]) (self._stdmonth, self._stddayofweek, # Sunday = 0 self._stdweeknumber, # Last = 5 self._stdhour, self._stdminute) = tup[1:6] tup = struct.unpack("=8h", keydict["DaylightStart"]) (self._dstmonth, self._dstdayofweek, # Sunday = 0 self._dstweeknumber, # Last = 5 self._dsthour, self._dstminute) = tup[1:6] def __reduce__(self): return (self.__class__, ()) def picknthweekday(year, month, dayofweek, hour, minute, whichweek): """dayofweek == 0 means Sunday, whichweek 5 means last instance""" first = datetime.datetime(year, month, 1, hour, minute) weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1)) for n in xrange(whichweek): dt = weekdayone+(whichweek-n)*ONEWEEK if dt.month == month: return dt def valuestodict(key): """Convert a registry key's values to a dictionary.""" dict = {} size = _winreg.QueryInfoKey(key)[1] for i in range(size): data = _winreg.EnumValue(key, i) dict[data[0]] = data[1] return dict
[ [ 1, 0, 0.0111, 0.0056, 0, 0.66, 0, 426, 0, 1, 0, 0, 426, 0, 0 ], [ 1, 0, 0.0167, 0.0056, 0, 0.66, 0.0667, 399, 0, 1, 0, 0, 399, 0, 0 ], [ 1, 0, 0.0222, 0.0056, 0, ...
[ "import datetime", "import struct", "import _winreg", "__author__ = \"Jeffrey Harris & Gustavo Niemeyer <gustavo@niemeyer.net>\"", "__all__ = [\"tzwin\", \"tzwinlocal\"]", "ONEWEEK = datetime.timedelta(7)", "TZKEYNAMENT = r\"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Time Zones\"", "TZKEYNAME9X ...
from _Book import _Book class Book(_Book): pass # Custom logic goes here.
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 208, 0, 1, 0, 0, 208, 0, 0 ], [ 3, 0, 0.7, 0.4, 0, 0.66, 1, 97, 0, 0, 0, 0, 208, 0, 0 ] ]
[ "from _Book import _Book", "class Book(_Book):\n pass" ]
''' Module which brings history information about files from Mercurial. @author: Rodrigo Damazio ''' import re import subprocess REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*') def _GetOutputLines(args): ''' Runs an external process and returns its output as a list of lines. @param args: the arguments to run ''' process = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines = True, shell = False) output = process.communicate()[0] return output.splitlines() def FillMercurialRevisions(filename, parsed_file): ''' Fills the revs attribute of all strings in the given parsed file with a list of revisions that touched the lines corresponding to that string. @param filename: the name of the file to get history for @param parsed_file: the parsed file to modify ''' # Take output of hg annotate to get revision of each line output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename]) # Create a map of line -> revision (key is list index, line 0 doesn't exist) line_revs = ['dummy'] for line in output_lines: rev_match = REVISION_REGEX.match(line) if not rev_match: raise 'Unexpected line of output from hg: %s' % line rev_hash = rev_match.group('hash') line_revs.append(rev_hash) for str in parsed_file.itervalues(): # Get the lines that correspond to each string start_line = str['startLine'] end_line = str['endLine'] # Get the revisions that touched those lines revs = [] for line_number in range(start_line, end_line + 1): revs.append(line_revs[line_number]) # Merge with any revisions that were already there # (for explict revision specification) if 'revs' in str: revs += str['revs'] # Assign the revisions to the string str['revs'] = frozenset(revs) def DoesRevisionSuperceed(filename, rev1, rev2): ''' Tells whether a revision superceeds another. This essentially means that the older revision is an ancestor of the newer one. This also returns True if the two revisions are the same. @param rev1: the revision that may be superceeding the other @param rev2: the revision that may be superceeded @return: True if rev1 superceeds rev2 or they're the same ''' if rev1 == rev2: return True # TODO: Add filename args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename] output_lines = _GetOutputLines(args) return rev2 in output_lines def NewestRevision(filename, rev1, rev2): ''' Returns which of two revisions is closest to the head of the repository. If none of them is the ancestor of the other, then we return either one. @param rev1: the first revision @param rev2: the second revision ''' if DoesRevisionSuperceed(filename, rev1, rev2): return rev1 return rev2
[ [ 8, 0, 0.0319, 0.0532, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0745, 0.0106, 0, 0.66, 0.1429, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0851, 0.0106, 0, 0.66...
[ "'''\nModule which brings history information about files from Mercurial.\n\n@author: Rodrigo Damazio\n'''", "import re", "import subprocess", "REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')", "def _GetOutputLines(args):\n '''\n Runs an external process and returns its output as a list of lines...
#!/usr/bin/python ''' Entry point for My Tracks i18n tool. @author: Rodrigo Damazio ''' import mytracks.files import mytracks.translate import mytracks.validate import sys def Usage(): print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0] print 'Commands are:' print ' cleanup' print ' translate' print ' validate' sys.exit(1) def Translate(languages): ''' Asks the user to interactively translate any missing or oudated strings from the files for the given languages. @param languages: the languages to translate ''' validator = mytracks.validate.Validator(languages) validator.Validate() missing = validator.missing_in_lang() outdated = validator.outdated_in_lang() for lang in languages: untranslated = missing[lang] + outdated[lang] if len(untranslated) == 0: continue translator = mytracks.translate.Translator(lang) translator.Translate(untranslated) def Validate(languages): ''' Computes and displays errors in the string files for the given languages. @param languages: the languages to compute for ''' validator = mytracks.validate.Validator(languages) validator.Validate() error_count = 0 if (validator.valid()): print 'All files OK' else: for lang, missing in validator.missing_in_master().iteritems(): print 'Missing in master, present in %s: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, missing in validator.missing_in_lang().iteritems(): print 'Missing in %s, present in master: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, outdated in validator.outdated_in_lang().iteritems(): print 'Outdated in %s: %s:' % (lang, str(outdated)) error_count = error_count + len(outdated) return error_count if __name__ == '__main__': argv = sys.argv argc = len(argv) if argc < 2: Usage() languages = mytracks.files.GetAllLanguageFiles() if argc == 3: langs = set(argv[2:]) if not langs.issubset(languages): raise 'Language(s) not found' # Filter just to the languages specified languages = dict((lang, lang_file) for lang, lang_file in languages.iteritems() if lang in langs or lang == 'en' ) cmd = argv[1] if cmd == 'translate': Translate(languages) elif cmd == 'validate': error_count = Validate(languages) else: Usage() error_count = 0 print '%d errors found.' % error_count
[ [ 8, 0, 0.0417, 0.0521, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0833, 0.0104, 0, 0.66, 0.125, 640, 0, 1, 0, 0, 640, 0, 0 ], [ 1, 0, 0.0938, 0.0104, 0, 0.66,...
[ "'''\nEntry point for My Tracks i18n tool.\n\n@author: Rodrigo Damazio\n'''", "import mytracks.files", "import mytracks.translate", "import mytracks.validate", "import sys", "def Usage():\n print('Usage: %s <command> [<language> ...]\\n' % sys.argv[0])\n print('Commands are:')\n print(' cleanup')\n p...
''' Module which prompts the user for translations and saves them. TODO: implement @author: Rodrigo Damazio ''' class Translator(object): ''' classdocs ''' def __init__(self, language): ''' Constructor ''' self._language = language def Translate(self, string_names): print string_names
[ [ 8, 0, 0.1905, 0.3333, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.7143, 0.619, 0, 0.66, 1, 229, 0, 2, 0, 0, 186, 0, 1 ], [ 8, 1, 0.5238, 0.1429, 1, 0.15, ...
[ "'''\nModule which prompts the user for translations and saves them.\n\nTODO: implement\n\n@author: Rodrigo Damazio\n'''", "class Translator(object):\n '''\n classdocs\n '''\n\n def __init__(self, language):\n '''\n Constructor", " '''\n classdocs\n '''", " def __init__(self, language):\n '''...
''' Module which compares languague files to the master file and detects issues. @author: Rodrigo Damazio ''' import os from mytracks.parser import StringsParser import mytracks.history class Validator(object): def __init__(self, languages): ''' Builds a strings file validator. Params: @param languages: a dictionary mapping each language to its corresponding directory ''' self._langs = {} self._master = None self._language_paths = languages parser = StringsParser() for lang, lang_dir in languages.iteritems(): filename = os.path.join(lang_dir, 'strings.xml') parsed_file = parser.Parse(filename) mytracks.history.FillMercurialRevisions(filename, parsed_file) if lang == 'en': self._master = parsed_file else: self._langs[lang] = parsed_file self._Reset() def Validate(self): ''' Computes whether all the data in the files for the given languages is valid. ''' self._Reset() self._ValidateMissingKeys() self._ValidateOutdatedKeys() def valid(self): return (len(self._missing_in_master) == 0 and len(self._missing_in_lang) == 0 and len(self._outdated_in_lang) == 0) def missing_in_master(self): return self._missing_in_master def missing_in_lang(self): return self._missing_in_lang def outdated_in_lang(self): return self._outdated_in_lang def _Reset(self): # These are maps from language to string name list self._missing_in_master = {} self._missing_in_lang = {} self._outdated_in_lang = {} def _ValidateMissingKeys(self): ''' Computes whether there are missing keys on either side. ''' master_keys = frozenset(self._master.iterkeys()) for lang, file in self._langs.iteritems(): keys = frozenset(file.iterkeys()) missing_in_master = keys - master_keys missing_in_lang = master_keys - keys if len(missing_in_master) > 0: self._missing_in_master[lang] = missing_in_master if len(missing_in_lang) > 0: self._missing_in_lang[lang] = missing_in_lang def _ValidateOutdatedKeys(self): ''' Computers whether any of the language keys are outdated with relation to the master keys. ''' for lang, file in self._langs.iteritems(): outdated = [] for key, str in file.iteritems(): # Get all revisions that touched master and language files for this # string. master_str = self._master[key] master_revs = master_str['revs'] lang_revs = str['revs'] if not master_revs or not lang_revs: print 'WARNING: No revision for %s in %s' % (key, lang) continue master_file = os.path.join(self._language_paths['en'], 'strings.xml') lang_file = os.path.join(self._language_paths[lang], 'strings.xml') # Assume that the repository has a single head (TODO: check that), # and as such there is always one revision which superceeds all others. master_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2), master_revs) lang_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2), lang_revs) # If the master version is newer than the lang version if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev): outdated.append(key) if len(outdated) > 0: self._outdated_in_lang[lang] = outdated
[ [ 8, 0, 0.0304, 0.0522, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66, 0.25, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0783, 0.0087, 0, 0.66, ...
[ "'''\nModule which compares languague files to the master file and detects\nissues.\n\n@author: Rodrigo Damazio\n'''", "import os", "from mytracks.parser import StringsParser", "import mytracks.history", "class Validator(object):\n\n def __init__(self, languages):\n '''\n Builds a strings file valida...
''' Module for dealing with resource files (but not their contents). @author: Rodrigo Damazio ''' import os.path from glob import glob import re MYTRACKS_RES_DIR = 'MyTracks/res' ANDROID_MASTER_VALUES = 'values' ANDROID_VALUES_MASK = 'values-*' def GetMyTracksDir(): ''' Returns the directory in which the MyTracks directory is located. ''' path = os.getcwd() while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)): if path == '/': raise 'Not in My Tracks project' # Go up one level path = os.path.split(path)[0] return path def GetAllLanguageFiles(): ''' Returns a mapping from all found languages to their respective directories. ''' mytracks_path = GetMyTracksDir() res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK) language_dirs = glob(res_dir) master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES) if len(language_dirs) == 0: raise 'No languages found!' if not os.path.isdir(master_dir): raise 'Couldn\'t find master file' language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs] language_tuples.append(('en', master_dir)) return dict(language_tuples)
[ [ 8, 0, 0.0667, 0.1111, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1333, 0.0222, 0, 0.66, 0.125, 79, 0, 1, 0, 0, 79, 0, 0 ], [ 1, 0, 0.1556, 0.0222, 0, 0.66, ...
[ "'''\nModule for dealing with resource files (but not their contents).\n\n@author: Rodrigo Damazio\n'''", "import os.path", "from glob import glob", "import re", "MYTRACKS_RES_DIR = 'MyTracks/res'", "ANDROID_MASTER_VALUES = 'values'", "ANDROID_VALUES_MASK = 'values-*'", "def GetMyTracksDir():\n '''\n...
''' Module which parses a string XML file. @author: Rodrigo Damazio ''' from xml.parsers.expat import ParserCreate import re #import xml.etree.ElementTree as ET class StringsParser(object): ''' Parser for string XML files. This object is not thread-safe and should be used for parsing a single file at a time, only. ''' def Parse(self, file): ''' Parses the given file and returns a dictionary mapping keys to an object with attributes for that key, such as the value, start/end line and explicit revisions. In addition to the standard XML format of the strings file, this parser supports an annotation inside comments, in one of these formats: <!-- KEEP_PARENT name="bla" --> <!-- KEEP_PARENT name="bla" rev="123456789012" --> Such an annotation indicates that we're explicitly inheriting form the master file (and the optional revision says that this decision is compatible with the master file up to that revision). @param file: the name of the file to parse ''' self._Reset() # Unfortunately expat is the only parser that will give us line numbers self._xml_parser = ParserCreate() self._xml_parser.StartElementHandler = self._StartElementHandler self._xml_parser.EndElementHandler = self._EndElementHandler self._xml_parser.CharacterDataHandler = self._CharacterDataHandler self._xml_parser.CommentHandler = self._CommentHandler file_obj = open(file) self._xml_parser.ParseFile(file_obj) file_obj.close() return self._all_strings def _Reset(self): self._currentString = None self._currentStringName = None self._currentStringValue = None self._all_strings = {} def _StartElementHandler(self, name, attrs): if name != 'string': return if 'name' not in attrs: return assert not self._currentString assert not self._currentStringName self._currentString = { 'startLine' : self._xml_parser.CurrentLineNumber, } if 'rev' in attrs: self._currentString['revs'] = [attrs['rev']] self._currentStringName = attrs['name'] self._currentStringValue = '' def _EndElementHandler(self, name): if name != 'string': return assert self._currentString assert self._currentStringName self._currentString['value'] = self._currentStringValue self._currentString['endLine'] = self._xml_parser.CurrentLineNumber self._all_strings[self._currentStringName] = self._currentString self._currentString = None self._currentStringName = None self._currentStringValue = None def _CharacterDataHandler(self, data): if not self._currentString: return self._currentStringValue += data _KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+' r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?' r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*', re.MULTILINE | re.DOTALL) def _CommentHandler(self, data): keep_parent_match = self._KEEP_PARENT_REGEX.match(data) if not keep_parent_match: return name = keep_parent_match.group('name') self._all_strings[name] = { 'keepParent' : True, 'startLine' : self._xml_parser.CurrentLineNumber, 'endLine' : self._xml_parser.CurrentLineNumber } rev = keep_parent_match.group('rev') if rev: self._all_strings[name]['revs'] = [rev]
[ [ 8, 0, 0.0261, 0.0435, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0609, 0.0087, 0, 0.66, 0.3333, 573, 0, 1, 0, 0, 573, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66...
[ "'''\nModule which parses a string XML file.\n\n@author: Rodrigo Damazio\n'''", "from xml.parsers.expat import ParserCreate", "import re", "class StringsParser(object):\n '''\n Parser for string XML files.\n\n This object is not thread-safe and should be used for parsing a single file at\n a time, only.\n...
#!/bin/env python import xml.dom.minidom as dom import sys import struct WEAP_NUM = 780 struct_fmt = "<H BBHBBBB 8B8B8b8b8b8b8H bbBBBB" def pack_weapon(dict): l = [] l.append(dict['drain']) l.append(dict['shotRepeat']) l.append(dict['multi']) l.append(dict['weapAni']) l.append(dict['max']) l.append(dict['tx']) l.append(dict['ty']) l.append(dict['aim']) tmp = dict['patterns'] for j in xrange(8): l.append(tmp[j]['attack']) for j in xrange(8): l.append(tmp[j]['del']) for j in xrange(8): l.append(tmp[j]['sx']) for j in xrange(8): l.append(tmp[j]['sy']) for j in xrange(8): l.append(tmp[j]['bx']) for j in xrange(8): l.append(tmp[j]['by']) for j in xrange(8): l.append(tmp[j]['sg']) l.append(dict['acceleration']) l.append(dict['accelerationx']) l.append(dict['circleSize']) l.append(dict['sound']) l.append(dict['trail']) l.append(dict['shipBlastFilter']) return struct.pack(struct_fmt, *l) def unpack_weapon(str): tup = struct.unpack(struct_fmt, str) dict = {} dict['drain'] = tup[0] dict['shotRepeat'] = tup[1] dict['multi'] = tup[2] dict['weapAni'] = tup[3] dict['max'] = tup[4] dict['tx'] = tup[5] dict['ty'] = tup[6] dict['aim'] = tup[7] i = 8 tmp = [{} for j in xrange(8)] for j in xrange(8): tmp[j]['attack'] = tup[i] i += 1 for j in xrange(8): tmp[j]['del'] = tup[i] i += 1 for j in xrange(8): tmp[j]['sx'] = tup[i] i += 1 for j in xrange(8): tmp[j]['sy'] = tup[i] i += 1 for j in xrange(8): tmp[j]['bx'] = tup[i] i += 1 for j in xrange(8): tmp[j]['by'] = tup[i] i += 1 for j in xrange(8): tmp[j]['sg'] = tup[i] i += 1 dict['patterns'] = tmp dict['acceleration'] = tup[i] dict['accelerationx'] = tup[i+1] dict['circleSize'] = tup[i+2] dict['sound'] = tup[i+3] dict['trail'] = tup[i+4] dict['shipBlastFilter'] = tup[i+5] return dict def DOMToDict(doc, weap_node): dict = {} for i in weap_node.childNodes: if i.nodeType != i.ELEMENT_NODE: continue if i.hasAttribute("value"): dict[i.tagName] = int(i.getAttribute("value")) elif i.tagName == "patterns": dict['patterns'] = [{} for el in xrange(8)] index = 0 for j in i.childNodes: if j.nodeType != i.ELEMENT_NODE: continue attrs = [j.attributes.item(i) for i in xrange(j.attributes.length)] for i in attrs: dict['patterns'][index][i.name] = int(i.nodeValue) index += 1 return dict def dictToDOM(doc, root, dict, index=None): entry = doc.createElement("weapon") if index != None: entry.setAttribute("index", "%04X" % (index,)) keys = dict.keys() keys.sort() for i in keys: node = doc.createElement(i) if isinstance(dict[i], list): for j in dict[i]: keys = j.keys() keys.sort() n = doc.createElement("entry") for i in keys: n.setAttribute(i, str(j[i])) node.appendChild(n) else: node.setAttribute("value", str(dict[i])) entry.appendChild(node) root.appendChild(entry) def toXML(hdt, output): doc = dom.getDOMImplementation().createDocument(None, "TyrianHDT", None) try: f = file(hdt, "rb") except IOError: print "%s couldn't be opened for reading." % (hdt,) sys.exit(1) try: outf = file(output, "w") except IOError: print "%s couldn't be opened for writing." % (outf,) sys.exit(1) f.seek(struct.unpack("<i", f.read(4))[0]) f.read(7*2) sys.stdout.write("Converting weapons") index = 0 for i in xrange(WEAP_NUM+1): tmp = f.read(struct.calcsize(struct_fmt)) shot = unpack_weapon(tmp) dictToDOM(doc, doc.documentElement, shot, index) index += 1 sys.stdout.write(".") sys.stdout.flush() sys.stdout.write("Done!\n") sys.stdout.write("Writing XML...") sys.stdout.flush() doc.writexml(outf, addindent="\t", newl="\n") sys.stdout.write("Done!\n") def toHDT(input, hdt): try: f = file(input, "r") except IOError: print "%s couldn't be opened for reading." % (input,) sys.exit(1) try: outf = file(hdt, "r+b") except IOError: print "%s couldn't be opened for writing." % (hdt,) sys.exit(1) outf.seek(struct.unpack("<i", outf.read(4))[0]) outf.read(7*2) sys.stdout.write("Reading XML...") sys.stdout.flush() doc = dom.parse(f) sys.stdout.write("Done!\n") sys.stdout.write("Writing weapons") for i in doc.documentElement.childNodes: if i.nodeType != i.ELEMENT_NODE: continue shot = DOMToDict(doc, i) str = pack_weapon(shot) outf.write(str) sys.stdout.write(".") sys.stdout.flush() sys.stdout.write("Done!\n") def printHelp(): print "Usage: weapons.py toxml path/to/tyrian.hdt output.xml" print " weapons.py tohdt input.xml path/to/tyrian.hdt" sys.exit(1) ############################## if __name__ == "__main__": if len(sys.argv) != 4: printHelp() if sys.argv[1] == "toxml": toXML(sys.argv[2], sys.argv[3]) elif sys.argv[1] == "tohdt": toHDT(sys.argv[2], sys.argv[3]) else: printHelp()
[ [ 1, 0, 0.0129, 0.0043, 0, 0.66, 0, 770, 0, 1, 0, 0, 770, 0, 0 ], [ 1, 0, 0.0172, 0.0043, 0, 0.66, 0.0833, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0215, 0.0043, 0, ...
[ "import xml.dom.minidom as dom", "import sys", "import struct", "WEAP_NUM = 780", "struct_fmt = \"<H BBHBBBB 8B8B8b8b8b8b8H bbBBBB\"", "def pack_weapon(dict):\n\tl = []\n\n\tl.append(dict['drain'])\n\tl.append(dict['shotRepeat'])\n\tl.append(dict['multi'])\n\tl.append(dict['weapAni'])\n\tl.append(dict['ma...
#!/usr/bin/python2.6 # # Simple http server to emulate api.playfoursquare.com import logging import shutil import sys import urlparse import SimpleHTTPServer import BaseHTTPServer class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """Handle playfoursquare.com requests, for testing.""" def do_GET(self): logging.warn('do_GET: %s, %s', self.command, self.path) url = urlparse.urlparse(self.path) logging.warn('do_GET: %s', url) query = urlparse.parse_qs(url.query) query_keys = [pair[0] for pair in query] response = self.handle_url(url) if response != None: self.send_200() shutil.copyfileobj(response, self.wfile) self.wfile.close() do_POST = do_GET def handle_url(self, url): path = None if url.path == '/v1/venue': path = '../captures/api/v1/venue.xml' elif url.path == '/v1/addvenue': path = '../captures/api/v1/venue.xml' elif url.path == '/v1/venues': path = '../captures/api/v1/venues.xml' elif url.path == '/v1/user': path = '../captures/api/v1/user.xml' elif url.path == '/v1/checkcity': path = '../captures/api/v1/checkcity.xml' elif url.path == '/v1/checkins': path = '../captures/api/v1/checkins.xml' elif url.path == '/v1/cities': path = '../captures/api/v1/cities.xml' elif url.path == '/v1/switchcity': path = '../captures/api/v1/switchcity.xml' elif url.path == '/v1/tips': path = '../captures/api/v1/tips.xml' elif url.path == '/v1/checkin': path = '../captures/api/v1/checkin.xml' elif url.path == '/history/12345.rss': path = '../captures/api/v1/feed.xml' if path is None: self.send_error(404) else: logging.warn('Using: %s' % path) return open(path) def send_200(self): self.send_response(200) self.send_header('Content-type', 'text/xml') self.end_headers() def main(): if len(sys.argv) > 1: port = int(sys.argv[1]) else: port = 8080 server_address = ('0.0.0.0', port) httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler) sa = httpd.socket.getsockname() print "Serving HTTP on", sa[0], "port", sa[1], "..." httpd.serve_forever() if __name__ == '__main__': main()
[ [ 1, 0, 0.0588, 0.0118, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0706, 0.0118, 0, 0.66, 0.125, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0824, 0.0118, 0, 0...
[ "import logging", "import shutil", "import sys", "import urlparse", "import SimpleHTTPServer", "import BaseHTTPServer", "class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n \"\"\"Handle playfoursquare.com requests, for testing.\"\"\"\n\n def do_GET(self):\n logging.warn('do_GET: %s, %s',...
#!/usr/bin/python import datetime import sys import textwrap import common from xml.dom import pulldom PARSER = """\ /** * Copyright 2009 Joe LaPenna */ package com.joelapenna.foursquare.parsers; import com.joelapenna.foursquare.Foursquare; import com.joelapenna.foursquare.error.FoursquareError; import com.joelapenna.foursquare.error.FoursquareParseException; import com.joelapenna.foursquare.types.%(type_name)s; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserException; import java.io.IOException; import java.util.logging.Level; import java.util.logging.Logger; /** * Auto-generated: %(timestamp)s * * @author Joe LaPenna (joe@joelapenna.com) * @param <T> */ public class %(type_name)sParser extends AbstractParser<%(type_name)s> { private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName()); private static final boolean DEBUG = Foursquare.PARSER_DEBUG; @Override public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException, FoursquareError, FoursquareParseException { parser.require(XmlPullParser.START_TAG, null, null); %(type_name)s %(top_node_name)s = new %(type_name)s(); while (parser.nextTag() == XmlPullParser.START_TAG) { String name = parser.getName(); %(stanzas)s } else { // Consume something we don't understand. if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name); skipSubTree(parser); } } return %(top_node_name)s; } }""" BOOLEAN_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText())); """ GROUP_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser)); """ COMPLEX_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser)); """ STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(parser.nextText()); """ def main(): type_name, top_node_name, attributes = common.WalkNodesForAttributes( sys.argv[1]) GenerateClass(type_name, top_node_name, attributes) def GenerateClass(type_name, top_node_name, attributes): """generate it. type_name: the type of object the parser returns top_node_name: the name of the object the parser returns. per common.WalkNodsForAttributes """ stanzas = [] for name in sorted(attributes): typ, children = attributes[name] replacements = Replacements(top_node_name, name, typ, children) if typ == common.BOOLEAN: stanzas.append(BOOLEAN_STANZA % replacements) elif typ == common.GROUP: stanzas.append(GROUP_STANZA % replacements) elif typ in common.COMPLEX: stanzas.append(COMPLEX_STANZA % replacements) else: stanzas.append(STANZA % replacements) if stanzas: # pop off the extranious } else for the first conditional stanza. stanzas[0] = stanzas[0].replace('} else ', '', 1) replacements = Replacements(top_node_name, name, typ, [None]) replacements['stanzas'] = '\n'.join(stanzas).strip() print PARSER % replacements def Replacements(top_node_name, name, typ, children): # CameCaseClassName type_name = ''.join([word.capitalize() for word in top_node_name.split('_')]) # CamelCaseClassName camel_name = ''.join([word.capitalize() for word in name.split('_')]) # camelCaseLocalName attribute_name = camel_name.lower().capitalize() # mFieldName field_name = 'm' + camel_name if children[0]: sub_parser_camel_case = children[0] + 'Parser' else: sub_parser_camel_case = (camel_name[:-1] + 'Parser') return { 'type_name': type_name, 'name': name, 'top_node_name': top_node_name, 'camel_name': camel_name, 'parser_name': typ + 'Parser', 'attribute_name': attribute_name, 'field_name': field_name, 'typ': typ, 'timestamp': datetime.datetime.now(), 'sub_parser_camel_case': sub_parser_camel_case, 'sub_type': children[0] } if __name__ == '__main__': main()
[ [ 1, 0, 0.0201, 0.0067, 0, 0.66, 0, 426, 0, 1, 0, 0, 426, 0, 0 ], [ 1, 0, 0.0268, 0.0067, 0, 0.66, 0.0769, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0336, 0.0067, 0, ...
[ "import datetime", "import sys", "import textwrap", "import common", "from xml.dom import pulldom", "PARSER = \"\"\"\\\n/**\n * Copyright 2009 Joe LaPenna\n */\n\npackage com.joelapenna.foursquare.parsers;\n\nimport com.joelapenna.foursquare.Foursquare;", "BOOLEAN_STANZA = \"\"\"\\\n } else i...
#!/usr/bin/python """ Pull a oAuth protected page from foursquare. Expects ~/.oget to contain (one on each line): CONSUMER_KEY CONSUMER_KEY_SECRET USERNAME PASSWORD Don't forget to chmod 600 the file! """ import httplib import os import re import sys import urllib import urllib2 import urlparse import user from xml.dom import pulldom from xml.dom import minidom import oauth """From: http://groups.google.com/group/foursquare-api/web/oauth @consumer = OAuth::Consumer.new("consumer_token","consumer_secret", { :site => "http://foursquare.com", :scheme => :header, :http_method => :post, :request_token_path => "/oauth/request_token", :access_token_path => "/oauth/access_token", :authorize_path => "/oauth/authorize" }) """ SERVER = 'api.foursquare.com:80' CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'} SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1() AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange' def parse_auth_response(auth_response): return ( re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0], re.search('<oauth_token_secret>(.*)</oauth_token_secret>', auth_response).groups()[0] ) def create_signed_oauth_request(username, password, consumer): oauth_request = oauth.OAuthRequest.from_consumer_and_token( consumer, http_method='POST', http_url=AUTHEXCHANGE_URL, parameters=dict(fs_username=username, fs_password=password)) oauth_request.sign_request(SIGNATURE_METHOD, consumer, None) return oauth_request def main(): url = urlparse.urlparse(sys.argv[1]) # Nevermind that the query can have repeated keys. parameters = dict(urlparse.parse_qsl(url.query)) password_file = open(os.path.join(user.home, '.oget')) lines = [line.strip() for line in password_file.readlines()] if len(lines) == 4: cons_key, cons_key_secret, username, password = lines access_token = None else: cons_key, cons_key_secret, username, password, token, secret = lines access_token = oauth.OAuthToken(token, secret) consumer = oauth.OAuthConsumer(cons_key, cons_key_secret) if not access_token: oauth_request = create_signed_oauth_request(username, password, consumer) connection = httplib.HTTPConnection(SERVER) headers = {'Content-Type' :'application/x-www-form-urlencoded'} connection.request(oauth_request.http_method, AUTHEXCHANGE_URL, body=oauth_request.to_postdata(), headers=headers) auth_response = connection.getresponse().read() token = parse_auth_response(auth_response) access_token = oauth.OAuthToken(*token) open(os.path.join(user.home, '.oget'), 'w').write('\n'.join(( cons_key, cons_key_secret, username, password, token[0], token[1]))) oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, access_token, http_method='POST', http_url=url.geturl(), parameters=parameters) oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token) connection = httplib.HTTPConnection(SERVER) connection.request(oauth_request.http_method, oauth_request.to_url(), body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER) print connection.getresponse().read() #print minidom.parse(connection.getresponse()).toprettyxml(indent=' ') if __name__ == '__main__': main()
[ [ 8, 0, 0.0631, 0.0991, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1261, 0.009, 0, 0.66, 0.05, 2, 0, 1, 0, 0, 2, 0, 0 ], [ 1, 0, 0.1351, 0.009, 0, 0.66, 0....
[ "\"\"\"\nPull a oAuth protected page from foursquare.\n\nExpects ~/.oget to contain (one on each line):\nCONSUMER_KEY\nCONSUMER_KEY_SECRET\nUSERNAME\nPASSWORD", "import httplib", "import os", "import re", "import sys", "import urllib", "import urllib2", "import urlparse", "import user", "from xml....
#!/usr/bin/python import os import subprocess import sys BASEDIR = '../main/src/com/joelapenna/foursquare' TYPESDIR = '../captures/types/v1' captures = sys.argv[1:] if not captures: captures = os.listdir(TYPESDIR) for f in captures: basename = f.split('.')[0] javaname = ''.join([c.capitalize() for c in basename.split('_')]) fullpath = os.path.join(TYPESDIR, f) typepath = os.path.join(BASEDIR, 'types', javaname + '.java') parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java') cmd = 'python gen_class.py %s > %s' % (fullpath, typepath) print cmd subprocess.call(cmd, stdout=sys.stdout, shell=True) cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath) print cmd subprocess.call(cmd, stdout=sys.stdout, shell=True)
[ [ 1, 0, 0.1111, 0.037, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.1481, 0.037, 0, 0.66, 0.1429, 394, 0, 1, 0, 0, 394, 0, 0 ], [ 1, 0, 0.1852, 0.037, 0, 0.6...
[ "import os", "import subprocess", "import sys", "BASEDIR = '../main/src/com/joelapenna/foursquare'", "TYPESDIR = '../captures/types/v1'", "captures = sys.argv[1:]", "if not captures:\n captures = os.listdir(TYPESDIR)", " captures = os.listdir(TYPESDIR)", "for f in captures:\n basename = f.split('...
#!/usr/bin/python import logging from xml.dom import minidom from xml.dom import pulldom BOOLEAN = "boolean" STRING = "String" GROUP = "Group" # Interfaces that all FoursquareTypes implement. DEFAULT_INTERFACES = ['FoursquareType'] # Interfaces that specific FoursqureTypes implement. INTERFACES = { } DEFAULT_CLASS_IMPORTS = [ ] CLASS_IMPORTS = { # 'Checkin': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], # 'Venue': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], # 'Tip': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], } COMPLEX = [ 'Group', 'Badge', 'Beenhere', 'Checkin', 'CheckinResponse', 'City', 'Credentials', 'Data', 'Mayor', 'Rank', 'Score', 'Scoring', 'Settings', 'Stats', 'Tags', 'Tip', 'User', 'Venue', ] TYPES = COMPLEX + ['boolean'] def WalkNodesForAttributes(path): """Parse the xml file getting all attributes. <venue> <attribute>value</attribute> </venue> Returns: type_name - The java-style name the top node will have. "Venue" top_node_name - unadultured name of the xml stanza, probably the type of java class we're creating. "venue" attributes - {'attribute': 'value'} """ doc = pulldom.parse(path) type_name = None top_node_name = None attributes = {} level = 0 for event, node in doc: # For skipping parts of a tree. if level > 0: if event == pulldom.END_ELEMENT: level-=1 logging.warn('(%s) Skip end: %s' % (str(level), node)) continue elif event == pulldom.START_ELEMENT: logging.warn('(%s) Skipping: %s' % (str(level), node)) level+=1 continue if event == pulldom.START_ELEMENT: logging.warn('Parsing: ' + node.tagName) # Get the type name to use. if type_name is None: type_name = ''.join([word.capitalize() for word in node.tagName.split('_')]) top_node_name = node.tagName logging.warn('Found Top Node Name: ' + top_node_name) continue typ = node.getAttribute('type') child = node.getAttribute('child') # We don't want to walk complex types. if typ in COMPLEX: logging.warn('Found Complex: ' + node.tagName) level = 1 elif typ not in TYPES: logging.warn('Found String: ' + typ) typ = STRING else: logging.warn('Found Type: ' + typ) logging.warn('Adding: ' + str((node, typ))) attributes.setdefault(node.tagName, (typ, [child])) logging.warn('Attr: ' + str((type_name, top_node_name, attributes))) return type_name, top_node_name, attributes
[ [ 1, 0, 0.0263, 0.0088, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0439, 0.0088, 0, 0.66, 0.0833, 290, 0, 1, 0, 0, 290, 0, 0 ], [ 1, 0, 0.0526, 0.0088, 0, ...
[ "import logging", "from xml.dom import minidom", "from xml.dom import pulldom", "BOOLEAN = \"boolean\"", "STRING = \"String\"", "GROUP = \"Group\"", "DEFAULT_INTERFACES = ['FoursquareType']", "INTERFACES = {\n}", "DEFAULT_CLASS_IMPORTS = [\n]", "CLASS_IMPORTS = {\n# 'Checkin': DEFAULT_CLASS_IMP...
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2012 Zdenko Podobný # Author: Zdenko Podobný # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple python demo script of tesseract-ocr 3.02 c-api """ import os import sys import ctypes # Demo variables lang = "eng" filename = "../phototest.tif" libpath = "/usr/local/lib64/" libpath_w = "../vs2008/DLL_Release/" TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX') if not TESSDATA_PREFIX: TESSDATA_PREFIX = "../" if sys.platform == "win32": libname = libpath_w + "libtesseract302.dll" libname_alt = "libtesseract302.dll" os.environ["PATH"] += os.pathsep + libpath_w else: libname = libpath + "libtesseract.so.3.0.2" libname_alt = "libtesseract.so.3" try: tesseract = ctypes.cdll.LoadLibrary(libname) except: try: tesseract = ctypes.cdll.LoadLibrary(libname_alt) except WindowsError, err: print("Trying to load '%s'..." % libname) print("Trying to load '%s'..." % libname_alt) print(err) exit(1) tesseract.TessVersion.restype = ctypes.c_char_p tesseract_version = tesseract.TessVersion()[:4] # We need to check library version because libtesseract.so.3 is symlink # and can point to other version than 3.02 if float(tesseract_version) < 3.02: print("Found tesseract-ocr library version %s." % tesseract_version) print("C-API is present only in version 3.02!") exit(2) api = tesseract.TessBaseAPICreate() rc = tesseract.TessBaseAPIInit3(api, TESSDATA_PREFIX, lang); if (rc): tesseract.TessBaseAPIDelete(api) print("Could not initialize tesseract.\n") exit(3) text_out = tesseract.TessBaseAPIProcessPages(api, filename, None , 0); result_text = ctypes.string_at(text_out) print result_text
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.5, 0.25, 0, 0.66, 0.5, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.75, 0.25, 0, 0.66, 1, ...
[ "import os", "import sys", "import ctypes" ]
#!/usr/bin/env python from distutils.core import setup from crawle import VERSION setup(name='CRAWL-E', version=VERSION, description='Highly distributed web crawling framework', author='Bryce Boe', author_email='bboe (_at_) cs.ucsb.edu', url='http://code.google.com/p/crawl-e', py_modules = ['crawle'] )
[ [ 1, 0, 0.1667, 0.0833, 0, 0.66, 0, 152, 0, 1, 0, 0, 152, 0, 0 ], [ 1, 0, 0.25, 0.0833, 0, 0.66, 0.5, 441, 0, 1, 0, 0, 441, 0, 0 ], [ 8, 0, 0.7083, 0.6667, 0, 0.66,...
[ "from distutils.core import setup", "from crawle import VERSION", "setup(name='CRAWL-E',\n version=VERSION,\n description='Highly distributed web crawling framework',\n author='Bryce Boe',\n author_email='bboe (_at_) cs.ucsb.edu',\n url='http://code.google.com/p/crawl-e',\n py_modu...
''' Module which brings history information about files from Mercurial. @author: Rodrigo Damazio ''' import re import subprocess REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*') def _GetOutputLines(args): ''' Runs an external process and returns its output as a list of lines. @param args: the arguments to run ''' process = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines = True, shell = False) output = process.communicate()[0] return output.splitlines() def FillMercurialRevisions(filename, parsed_file): ''' Fills the revs attribute of all strings in the given parsed file with a list of revisions that touched the lines corresponding to that string. @param filename: the name of the file to get history for @param parsed_file: the parsed file to modify ''' # Take output of hg annotate to get revision of each line output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename]) # Create a map of line -> revision (key is list index, line 0 doesn't exist) line_revs = ['dummy'] for line in output_lines: rev_match = REVISION_REGEX.match(line) if not rev_match: raise 'Unexpected line of output from hg: %s' % line rev_hash = rev_match.group('hash') line_revs.append(rev_hash) for str in parsed_file.itervalues(): # Get the lines that correspond to each string start_line = str['startLine'] end_line = str['endLine'] # Get the revisions that touched those lines revs = [] for line_number in range(start_line, end_line + 1): revs.append(line_revs[line_number]) # Merge with any revisions that were already there # (for explict revision specification) if 'revs' in str: revs += str['revs'] # Assign the revisions to the string str['revs'] = frozenset(revs) def DoesRevisionSuperceed(filename, rev1, rev2): ''' Tells whether a revision superceeds another. This essentially means that the older revision is an ancestor of the newer one. This also returns True if the two revisions are the same. @param rev1: the revision that may be superceeding the other @param rev2: the revision that may be superceeded @return: True if rev1 superceeds rev2 or they're the same ''' if rev1 == rev2: return True # TODO: Add filename args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename] output_lines = _GetOutputLines(args) return rev2 in output_lines def NewestRevision(filename, rev1, rev2): ''' Returns which of two revisions is closest to the head of the repository. If none of them is the ancestor of the other, then we return either one. @param rev1: the first revision @param rev2: the second revision ''' if DoesRevisionSuperceed(filename, rev1, rev2): return rev1 return rev2
[ [ 8, 0, 0.0319, 0.0532, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0745, 0.0106, 0, 0.66, 0.1429, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0851, 0.0106, 0, 0.66...
[ "'''\nModule which brings history information about files from Mercurial.\n\n@author: Rodrigo Damazio\n'''", "import re", "import subprocess", "REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')", "def _GetOutputLines(args):\n '''\n Runs an external process and returns its output as a list of lines...
#!/usr/bin/python ''' Entry point for My Tracks i18n tool. @author: Rodrigo Damazio ''' import mytracks.files import mytracks.translate import mytracks.validate import sys def Usage(): print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0] print 'Commands are:' print ' cleanup' print ' translate' print ' validate' sys.exit(1) def Translate(languages): ''' Asks the user to interactively translate any missing or oudated strings from the files for the given languages. @param languages: the languages to translate ''' validator = mytracks.validate.Validator(languages) validator.Validate() missing = validator.missing_in_lang() outdated = validator.outdated_in_lang() for lang in languages: untranslated = missing[lang] + outdated[lang] if len(untranslated) == 0: continue translator = mytracks.translate.Translator(lang) translator.Translate(untranslated) def Validate(languages): ''' Computes and displays errors in the string files for the given languages. @param languages: the languages to compute for ''' validator = mytracks.validate.Validator(languages) validator.Validate() error_count = 0 if (validator.valid()): print 'All files OK' else: for lang, missing in validator.missing_in_master().iteritems(): print 'Missing in master, present in %s: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, missing in validator.missing_in_lang().iteritems(): print 'Missing in %s, present in master: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, outdated in validator.outdated_in_lang().iteritems(): print 'Outdated in %s: %s:' % (lang, str(outdated)) error_count = error_count + len(outdated) return error_count if __name__ == '__main__': argv = sys.argv argc = len(argv) if argc < 2: Usage() languages = mytracks.files.GetAllLanguageFiles() if argc == 3: langs = set(argv[2:]) if not langs.issubset(languages): raise 'Language(s) not found' # Filter just to the languages specified languages = dict((lang, lang_file) for lang, lang_file in languages.iteritems() if lang in langs or lang == 'en' ) cmd = argv[1] if cmd == 'translate': Translate(languages) elif cmd == 'validate': error_count = Validate(languages) else: Usage() error_count = 0 print '%d errors found.' % error_count
[ [ 8, 0, 0.0417, 0.0521, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0833, 0.0104, 0, 0.66, 0.125, 640, 0, 1, 0, 0, 640, 0, 0 ], [ 1, 0, 0.0938, 0.0104, 0, 0.66,...
[ "'''\nEntry point for My Tracks i18n tool.\n\n@author: Rodrigo Damazio\n'''", "import mytracks.files", "import mytracks.translate", "import mytracks.validate", "import sys", "def Usage():\n print('Usage: %s <command> [<language> ...]\\n' % sys.argv[0])\n print('Commands are:')\n print(' cleanup')\n p...
''' Module which prompts the user for translations and saves them. TODO: implement @author: Rodrigo Damazio ''' class Translator(object): ''' classdocs ''' def __init__(self, language): ''' Constructor ''' self._language = language def Translate(self, string_names): print string_names
[ [ 8, 0, 0.1905, 0.3333, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.7143, 0.619, 0, 0.66, 1, 229, 0, 2, 0, 0, 186, 0, 1 ], [ 8, 1, 0.5238, 0.1429, 1, 0.77, ...
[ "'''\nModule which prompts the user for translations and saves them.\n\nTODO: implement\n\n@author: Rodrigo Damazio\n'''", "class Translator(object):\n '''\n classdocs\n '''\n\n def __init__(self, language):\n '''\n Constructor", " '''\n classdocs\n '''", " def __init__(self, language):\n '''...
''' Module which compares languague files to the master file and detects issues. @author: Rodrigo Damazio ''' import os from mytracks.parser import StringsParser import mytracks.history class Validator(object): def __init__(self, languages): ''' Builds a strings file validator. Params: @param languages: a dictionary mapping each language to its corresponding directory ''' self._langs = {} self._master = None self._language_paths = languages parser = StringsParser() for lang, lang_dir in languages.iteritems(): filename = os.path.join(lang_dir, 'strings.xml') parsed_file = parser.Parse(filename) mytracks.history.FillMercurialRevisions(filename, parsed_file) if lang == 'en': self._master = parsed_file else: self._langs[lang] = parsed_file self._Reset() def Validate(self): ''' Computes whether all the data in the files for the given languages is valid. ''' self._Reset() self._ValidateMissingKeys() self._ValidateOutdatedKeys() def valid(self): return (len(self._missing_in_master) == 0 and len(self._missing_in_lang) == 0 and len(self._outdated_in_lang) == 0) def missing_in_master(self): return self._missing_in_master def missing_in_lang(self): return self._missing_in_lang def outdated_in_lang(self): return self._outdated_in_lang def _Reset(self): # These are maps from language to string name list self._missing_in_master = {} self._missing_in_lang = {} self._outdated_in_lang = {} def _ValidateMissingKeys(self): ''' Computes whether there are missing keys on either side. ''' master_keys = frozenset(self._master.iterkeys()) for lang, file in self._langs.iteritems(): keys = frozenset(file.iterkeys()) missing_in_master = keys - master_keys missing_in_lang = master_keys - keys if len(missing_in_master) > 0: self._missing_in_master[lang] = missing_in_master if len(missing_in_lang) > 0: self._missing_in_lang[lang] = missing_in_lang def _ValidateOutdatedKeys(self): ''' Computers whether any of the language keys are outdated with relation to the master keys. ''' for lang, file in self._langs.iteritems(): outdated = [] for key, str in file.iteritems(): # Get all revisions that touched master and language files for this # string. master_str = self._master[key] master_revs = master_str['revs'] lang_revs = str['revs'] if not master_revs or not lang_revs: print 'WARNING: No revision for %s in %s' % (key, lang) continue master_file = os.path.join(self._language_paths['en'], 'strings.xml') lang_file = os.path.join(self._language_paths[lang], 'strings.xml') # Assume that the repository has a single head (TODO: check that), # and as such there is always one revision which superceeds all others. master_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2), master_revs) lang_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2), lang_revs) # If the master version is newer than the lang version if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev): outdated.append(key) if len(outdated) > 0: self._outdated_in_lang[lang] = outdated
[ [ 8, 0, 0.0304, 0.0522, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66, 0.25, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0783, 0.0087, 0, 0.66, ...
[ "'''\nModule which compares languague files to the master file and detects\nissues.\n\n@author: Rodrigo Damazio\n'''", "import os", "from mytracks.parser import StringsParser", "import mytracks.history", "class Validator(object):\n\n def __init__(self, languages):\n '''\n Builds a strings file valida...
''' Module for dealing with resource files (but not their contents). @author: Rodrigo Damazio ''' import os.path from glob import glob import re MYTRACKS_RES_DIR = 'MyTracks/res' ANDROID_MASTER_VALUES = 'values' ANDROID_VALUES_MASK = 'values-*' def GetMyTracksDir(): ''' Returns the directory in which the MyTracks directory is located. ''' path = os.getcwd() while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)): if path == '/': raise 'Not in My Tracks project' # Go up one level path = os.path.split(path)[0] return path def GetAllLanguageFiles(): ''' Returns a mapping from all found languages to their respective directories. ''' mytracks_path = GetMyTracksDir() res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK) language_dirs = glob(res_dir) master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES) if len(language_dirs) == 0: raise 'No languages found!' if not os.path.isdir(master_dir): raise 'Couldn\'t find master file' language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs] language_tuples.append(('en', master_dir)) return dict(language_tuples)
[ [ 8, 0, 0.0667, 0.1111, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1333, 0.0222, 0, 0.66, 0.125, 79, 0, 1, 0, 0, 79, 0, 0 ], [ 1, 0, 0.1556, 0.0222, 0, 0.66, ...
[ "'''\nModule for dealing with resource files (but not their contents).\n\n@author: Rodrigo Damazio\n'''", "import os.path", "from glob import glob", "import re", "MYTRACKS_RES_DIR = 'MyTracks/res'", "ANDROID_MASTER_VALUES = 'values'", "ANDROID_VALUES_MASK = 'values-*'", "def GetMyTracksDir():\n '''\n...
''' Module which parses a string XML file. @author: Rodrigo Damazio ''' from xml.parsers.expat import ParserCreate import re #import xml.etree.ElementTree as ET class StringsParser(object): ''' Parser for string XML files. This object is not thread-safe and should be used for parsing a single file at a time, only. ''' def Parse(self, file): ''' Parses the given file and returns a dictionary mapping keys to an object with attributes for that key, such as the value, start/end line and explicit revisions. In addition to the standard XML format of the strings file, this parser supports an annotation inside comments, in one of these formats: <!-- KEEP_PARENT name="bla" --> <!-- KEEP_PARENT name="bla" rev="123456789012" --> Such an annotation indicates that we're explicitly inheriting form the master file (and the optional revision says that this decision is compatible with the master file up to that revision). @param file: the name of the file to parse ''' self._Reset() # Unfortunately expat is the only parser that will give us line numbers self._xml_parser = ParserCreate() self._xml_parser.StartElementHandler = self._StartElementHandler self._xml_parser.EndElementHandler = self._EndElementHandler self._xml_parser.CharacterDataHandler = self._CharacterDataHandler self._xml_parser.CommentHandler = self._CommentHandler file_obj = open(file) self._xml_parser.ParseFile(file_obj) file_obj.close() return self._all_strings def _Reset(self): self._currentString = None self._currentStringName = None self._currentStringValue = None self._all_strings = {} def _StartElementHandler(self, name, attrs): if name != 'string': return if 'name' not in attrs: return assert not self._currentString assert not self._currentStringName self._currentString = { 'startLine' : self._xml_parser.CurrentLineNumber, } if 'rev' in attrs: self._currentString['revs'] = [attrs['rev']] self._currentStringName = attrs['name'] self._currentStringValue = '' def _EndElementHandler(self, name): if name != 'string': return assert self._currentString assert self._currentStringName self._currentString['value'] = self._currentStringValue self._currentString['endLine'] = self._xml_parser.CurrentLineNumber self._all_strings[self._currentStringName] = self._currentString self._currentString = None self._currentStringName = None self._currentStringValue = None def _CharacterDataHandler(self, data): if not self._currentString: return self._currentStringValue += data _KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+' r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?' r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*', re.MULTILINE | re.DOTALL) def _CommentHandler(self, data): keep_parent_match = self._KEEP_PARENT_REGEX.match(data) if not keep_parent_match: return name = keep_parent_match.group('name') self._all_strings[name] = { 'keepParent' : True, 'startLine' : self._xml_parser.CurrentLineNumber, 'endLine' : self._xml_parser.CurrentLineNumber } rev = keep_parent_match.group('rev') if rev: self._all_strings[name]['revs'] = [rev]
[ [ 8, 0, 0.0261, 0.0435, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0609, 0.0087, 0, 0.66, 0.3333, 573, 0, 1, 0, 0, 573, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66...
[ "'''\nModule which parses a string XML file.\n\n@author: Rodrigo Damazio\n'''", "from xml.parsers.expat import ParserCreate", "import re", "class StringsParser(object):\n '''\n Parser for string XML files.\n\n This object is not thread-safe and should be used for parsing a single file at\n a time, only.\n...
import socket import urllib __author__="uli" __date__ ="$12.01.2009 16:48:52$" if __name__ == "__main__": url = raw_input("URL: ") count = int(raw_input("Count: ")) traffic = 0 #Traffic in bytes for i in xrange(count): print "Starting %ith download" % (i+1) socket = urllib.urlopen(url) d = socket.read() traffic += len(d) print "Finished %ith download" % (i+1) print "Traffic: %i bytes" % traffic print "Overall traffic: %i bytes" % traffic
[ [ 1, 0, 0.0556, 0.0556, 0, 0.66, 0, 687, 0, 1, 0, 0, 687, 0, 0 ], [ 1, 0, 0.1111, 0.0556, 0, 0.66, 0.25, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 14, 0, 0.2222, 0.0556, 0, 0...
[ "import socket", "import urllib", "__author__=\"uli\"", "__date__ =\"$12.01.2009 16:48:52$\"", "if __name__ == \"__main__\":\n url = raw_input(\"URL: \")\n count = int(raw_input(\"Count: \"))\n traffic = 0 #Traffic in bytes\n for i in xrange(count):\n print(\"Starting %ith download\" % (i...
# -*- coding: UTF-8 -*- # # gameOver.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. ''' Created on 27/06/2012 @author: Juan Pablo Moreno y Alejandro Duarte ''' from random import randint import pygame from funcionesBasicas import Funciones as funciones class GameOver(): def __init__(self, ventana): self.ventana = ventana self.imagen = randint(1,3) self.imagen_fondo = funciones.cargarImagen("imagenes/game over "+str(self.imagen)+".jpg")#final con imagen aleatoria self.musica_fondo = "sonido/PUPPET OF THE MAGUS.ogg" self.alpha = 0 self.imagen_fondo.set_alpha(self.alpha) def blit_alpha(self, ventana, imagen, ubicacion, opacidad): """Metodo que controla la transparencia del fade-in""" x = ubicacion[0] y = ubicacion[1] temp = pygame.Surface((imagen.get_width(), imagen.get_height())).convert() temp.blit(ventana, (-x, -y)) temp.blit(imagen, (0,0)) temp.set_alpha(opacidad) ventana.blit(temp, ubicacion) def mainGameOver(self): funciones.cargarMusica(self.musica_fondo) pygame.mixer.music.play(-1) while True: if self.alpha <= 255: self.alpha += 0.05 self.blit_alpha(self.ventana, self.imagen_fondo, (0,0), int(self.alpha)) for evento in pygame.event.get(): if (evento.type == pygame.QUIT or evento.type == pygame.KEYDOWN) and self.alpha >= 50: pygame.mixer.music.stop() return 0 pygame.display.update() return 0
[ [ 8, 0, 0.2672, 0.069, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.3103, 0.0172, 0, 0.66, 0.25, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.3276, 0.0172, 0, 0.66, ...
[ "'''\nCreated on 27/06/2012\n@author: Juan Pablo Moreno y Alejandro Duarte\n'''", "from random import randint", "import pygame", "from funcionesBasicas import Funciones as funciones", "class GameOver():\n\t\n\tdef __init__(self, ventana):\n\t\tself.ventana = ventana\n\t\tself.imagen = randint(1,3)\n\t\tself...
# -*- coding: UTF-8 -*- # # menu.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. ''' Created on 30/06/2012 @author: Juan Pablo Moreno y Alejandro Duarte ''' import sys import pygame from funcionesBasicas import Funciones as funciones from objetos import Cursor, Boton class Menu_UDTanks(): """Menu principal del juego, tiene 5 botones y un sonido en reproduccion """ def __init__(self, pantalla): self.ventana = pantalla self.imagen_fondo = funciones.cargarImagen("imagenes/menu.jpg") self.musica_fondo = "sonido/TWIN CRESCENT.ogg" self.empezar = Boton(funciones.cargarImagen("imagenes/boton1.png"),125,450)#boton para nivel 1 self.continuar = Boton(funciones.cargarImagen("imagenes/boton2.png"),325,450)#boton Cargar jugador Guardado self.puntajes = Boton(funciones.cargarImagen("imagenes/boton3.png"),525,450)#boton ver puntajes self.creditos = Boton(funciones.cargarImagen("imagenes/boton4.png"),225,500)#boton ver creditos self.salir = Boton(funciones.cargarImagen("imagenes/boton5.png"),425,500)#boton salir self.cursor = Cursor() self.nivelEnEjecucion = None def mainMenu(self): funciones.cargarMusica(self.musica_fondo) pygame.mixer.music.play(-1) reloj = pygame.time.Clock() while True: for evento in pygame.event.get(): if evento.type == pygame.QUIT or evento.type == pygame.K_ESCAPE: pygame.mixer.music.stop() pygame.quit() sys.exit() elif evento.type == pygame.MOUSEBUTTONDOWN: if self.cursor.colliderect(self.empezar): try: import nivel_1 nombre = funciones.ingresarUsuario() self.nivelEnEjecucion = nivel_1.Nivel1(self.ventana, nombre) pygame.mixer.music.stop() self.nivelEnEjecucion.mainNivel1() # ejecuta el nivel 1 self.nivelEnEjecucion = None del(nivel_1) funciones.cargarMusica(self.musica_fondo) pygame.mixer.music.play(-1) except(ImportError): print("No se encuentra el módulo correspondeinte") elif self.cursor.colliderect(self.continuar): try: import nivel_1 nombre = funciones.ingresarUsuario() self.nivelEnEjecucion = nivel_1.Nivel1(self.ventana, nombre) self.nivelEnEjecucion.cargarDatos(nombre)# carga los datos guardados pygame.mixer.music.stop() self.nivelEnEjecucion.mainNivel1() self.nivelEnEjecucion = None del(nivel_1) funciones.cargarMusica(self.musica_fondo) pygame.mixer.music.play(-1) except(ImportError): print("No se encuentra el módulo correspondeinte") elif self.cursor.colliderect(self.puntajes): try: import puntajes self.nivelEnEjecucion = puntajes.Puntajes(self.ventana) pygame.mixer.music.stop() self.nivelEnEjecucion.mainPuntajes() # muestra la ventana de puntajes self.nivelEnEjecucion = None del(puntajes) funciones.cargarMusica(self.musica_fondo) pygame.mixer.music.play(-1) except(ImportError): print("No se encuentra el módulo correspondeinte") elif self.cursor.colliderect(self.creditos): try: import creditos self.nivelEnEjecucion = creditos.Creditos(self.ventana) pygame.mixer.music.stop() self.nivelEnEjecucion.mainCreditos() # abre la ventana de creditos self.nivelEnEjecucion = None del(creditos) funciones.cargarMusica(self.musica_fondo) pygame.mixer.music.play(-1) except(ImportError): print("No se encuentra el módulo correspondeinte") elif self.cursor.colliderect(self.salir): pygame.mixer.music.stop() pygame.quit() # cierra el juego sys.exit() self.cursor.actualizar() self.ventana.blit(self.imagen_fondo, (0,0)) self.ventana.blit(self.empezar.imagen, self.empezar.rect) self.ventana.blit(self.continuar.imagen, self.continuar.rect) self.ventana.blit(self.puntajes.imagen, self.puntajes.rect) self.ventana.blit(self.creditos.imagen, self.creditos.rect) self.ventana.blit(self.salir.imagen, self.salir.rect) pygame.display.update() reloj.tick(60) return 0
[ [ 8, 0, 0.1281, 0.0331, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1488, 0.0083, 0, 0.66, 0.2, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.157, 0.0083, 0, 0.66, ...
[ "'''\nCreated on 30/06/2012\n@author: Juan Pablo Moreno y Alejandro Duarte\n'''", "import sys", "import pygame", "from funcionesBasicas import Funciones as funciones", "from objetos import Cursor, Boton", "class Menu_UDTanks():\n\t\"\"\"Menu principal del juego, tiene 5 botones y un sonido en reproduccion...
# -*- coding: UTF-8 -*- # # intro.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. ''' Created on 10/07/2012 @author: Juan Pablo Moreno y Alejandro Duarte ''' import pygame from funcionesBasicas import Funciones as funciones class Intro(): """Introduccion al juego, muestra los logotipos de la universidad y de Pygroup. Importa y ejecuta el menu principal. Esta es la clase que inicia la ejecucion del programa. """ def __init__(self): pygame.init() self.ventana = pygame.display.set_mode(funciones.VENTANA) self.imagen_fondoA = funciones.cargarImagen("imagenes/escudo_UD.png") self.imagen_fondoB = funciones.cargarImagen("imagenes/Pygroup_Logo.jpg") self.tiempo = 400 self.juego = None def introduccion(self): pygame.init() pygame.display.set_caption("UDTanks 2.0") reloj = pygame.time.Clock() while True: self.tiempo-=1 if self.tiempo > 200: self.ventana.blit(self.imagen_fondoA, (0,0)) elif self.tiempo > 0 and self.tiempo <= 200: self.ventana.blit(self.imagen_fondoB, (0,0)) else: try: import menu self.juego = menu.Menu_UDTanks(self.ventana) self.juego.mainMenu() except(ImportError): print("No se encuentra el juego") return 0 pygame.display.update() reloj.tick(60) return 0 if __name__ == '__main__': jugar = Intro() jugar.introduccion()
[ [ 8, 0, 0.25, 0.0645, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.2903, 0.0161, 0, 0.66, 0.25, 87, 0, 1, 0, 0, 87, 0, 0 ], [ 1, 0, 0.3065, 0.0161, 0, 0.66, ...
[ "'''\nCreated on 10/07/2012\n@author: Juan Pablo Moreno y Alejandro Duarte\n'''", "import pygame", "from funcionesBasicas import Funciones as funciones", "class Intro():\n\t\"\"\"Introduccion al juego, muestra los logotipos de la universidad y de Pygroup.\n\tImporta y ejecuta el menu principal.\n\tEsta es la ...
# -*- coding: UTF-8 -*- # # funciones.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. ''' Created on 10/05/2012 @author: Juan Pablo Moreno y Alejandro Duarte ''' import pygame from tkinter import Tk, Entry, Button, StringVar from math import degrees, radians, sin, cos, atan2 class Funciones(): ''' Constantes y metodos estaticos para el uso general del programa. Tales como: cargarImagen cargarSonido cargarMusica dibujarTexto agregarImagen direccionPunto vectorEnX vectorEnY ''' #Constantes para el ancho y alto de la ventana VENTANA = (ANCHO, ALTO) = (800, 600) @classmethod def ingresarUsuario(cls): cls.nombre="" def salir(): root.quit() def cargarArchivo(): cls.nombre=a.get() root.destroy() def obtenerN(): n=a.get() return (n) root = Tk() root.title('CargarDatos') a = StringVar() atxt = Entry(root, textvariable=a,width=20) cargar = Button(root, text="Cargar Archivo", command=cargarArchivo,width=15) salirB= Button(root, text ="Salir", command=root.destroy, width=10) atxt.grid(row=0, column=0) cargar.grid(row=1, column=0) salirB.grid(row=1,column=1) root.mainloop() return (obtenerN()) #Funciones para uso general de cualquier objeto @classmethod def cargarImagen(cls, archivo): """ Crea un objeto que contiene la imagen dada, si la imagen no se encuentra, el programa no puede iniciar. """ try: imagen = pygame.image.load(archivo).convert_alpha() except(pygame.error): #en caso de error print("No se pudo cargar la imagen: ", archivo) pygame.quit() raise(SystemExit) return imagen @classmethod def cargarSonido(cls, archivo): """Crea un objeto a partir del archivo de efecto de sonido dado""" try: sonido = pygame.mixer.Sound(archivo) except(pygame.error): print("No se pudo cargar el sonido: ", archivo) sonido = None return sonido @classmethod def cargarMusica(cls, archivo): """Carga en el mixer el archivo de musica dado para su reproduccion""" try: pygame.mixer.music.load(archivo) except(pygame.error): print("No se pudo cargar la cancion: ", archivo) @classmethod def dibujarTexto(cls, texto, posx, posy, color): """ Genera dado un texto y una posicion, una imagen para dibujar en pantalla y su respectivo rectangulo contenedor. """ fuente = pygame.font.Font("DroidSans.ttf", 20) salida = pygame.font.Font.render(fuente, texto, 1, color) salida_rect = salida.get_rect() salida_rect.centerx = posx salida_rect.centery = posy return salida, salida_rect @classmethod def agregarImagen(cls, ruta, ancho, alto): """Corta las subimagenes de una plantilla de imagenes para animar""" subimagenes = [] imagen_completa = cls.cargarImagen(ruta) ancho_total, alto_total = imagen_completa.get_size() for i in range(int(alto_total / alto)): for j in range(int(ancho_total / ancho)): subimagenes.append(imagen_completa.subsurface(pygame.Rect(j * ancho, i * alto, ancho, alto))) return subimagenes @classmethod def direccionPunto(cls, x, y, x2_y2,): """Devuelve la direccion en grados que hay hacia un punto""" x2, y2 = x2_y2 dist_x = x2 - x dist_y = y2 - y direccion = -1 * degrees(atan2(dist_y, dist_x)) return direccion @classmethod def vectorEnX(cls, dist, ang): """Devuelve el componente x de un vector""" return cos(radians(ang)) * dist @classmethod def vectorEnY(cls, dist, ang): """Devuelve el componente y de un vector""" return sin(radians(ang)) * -dist
[ [ 8, 0, 0.1092, 0.0282, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1338, 0.007, 0, 0.66, 0.25, 87, 0, 1, 0, 0, 87, 0, 0 ], [ 1, 0, 0.1408, 0.007, 0, 0.66, ...
[ "'''\nCreated on 10/05/2012\n@author: Juan Pablo Moreno y Alejandro Duarte\n'''", "import pygame", "from tkinter import Tk, Entry, Button, StringVar", "from math import degrees, radians, sin, cos, atan2", "class Funciones():\n\t'''\n\tConstantes y metodos estaticos para el uso general del programa.\n\tTales...
# -*- coding: UTF-8 -*- # # nivel_1.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. ''' Created on 10/05/2012 @author: Juan Pablo Moreno y Alejandro Duarte ''' from funcionesBasicas import Funciones as funciones from objetos import * class Nivel1(): """Escena numero 1 del juego""" def __init__(self, pantalla, nombre): self.ventana = pantalla self.imagen_fondo = funciones.cargarImagen("imagenes/nivel 1/fondo_Nv_1.png") self.musica_fondo = "sonido/CROSSFIRE BARRAGE.ogg" self.base_tanque = Base_de_Tanque("imagenes/nivel 1/tanque_base_Nv_1.png") self.rotor_tanque = Rotor_de_Tanque("imagenes/nivel 1/tanque_rotor_Nv_1.png", "sonido/Explosion01.ogg", nombre) self.pos_mouse = [] self.bot_mouse = 0 self.enemigos = [] self.balas = [] self.bonuscreado=[] self.explosiones = [] self.alarma = 1 self.salir = False def controlEnemigos(self): self.alarma -= 0.1 if self.alarma <= 0: xrandom = random.randint(0, funciones.VENTANA[0]) yrandom = random.randint(0, funciones.VENTANA[1]) if not (xrandom > 0 and xrandom < funciones.VENTANA[0]) \ and (yrandom > 0 and yrandom < funciones.VENTANA[1]): enemigo = Enemigo_Tanque(xrandom, yrandom, "imagenes/nivel 1/tanque_enemigo_Nv_1.png", "sonido/Explosion01.ogg") self.enemigos.append(enemigo) self.alarma = 1 def actualizarEnemigos(self): for enemigo in self.enemigos: enemigo.actualizar(self.base_tanque.posicion) bala = enemigo.disparar() self.ventana.blit(enemigo.postimagen, enemigo.rect) if bala is not False: self.balas.append(bala) if self.dibujar_Balas(self.ventana, self.base_tanque.rect): self.rotor_tanque.vida -= 20 def controlExplosiones(self): for i in range(len(self.explosiones)): if self.explosiones[i].actualizar(self.ventana): del(self.explosiones[i]) break def dibujar_Balas(self, ventana, otro): for i in range(len(self.balas)): if self.balas[i].actualizar(ventana): del(self.balas[i]) break if self.balas[i].rect.colliderect(otro): del(self.balas[i]) return True def controlBonuses(self): bonus = None for i in range(len(self.bonuscreado)): if self.bonuscreado[i] == None: del(self.bonuscreado[i]) break elif self.bonuscreado[i].actualizar(self.ventana, self.base_tanque.rect): bonus = self.bonuscreado.pop(i) break if bonus != None: if bonus.tipo == 1: self.rotor_tanque.vida += 10 elif bonus.tipo == 2: self.rotor_tanque.balasPorDisparar += 5 elif bonus.tipo == 3: self.rotor_tanque.tiempo += 500 elif bonus.tipo == 4: j=0 for j in range(len(self.enemigos)): if(j!=0): j=0 self.rotor_tanque.puntajeNivel += len(self.enemigos) self.bonuscreado.append(self.enemigos[j].darBonus()) sale = self.enemigos.pop(j) explosion = Explosion(sale.rect.center) self.explosiones.append(explosion) def controlColisiones(self): for i in range(len(self.rotor_tanque.balas_disparadas)): for j in range(len(self.enemigos)): if self.enemigos[j].rect.colliderect(self.rotor_tanque.balas_disparadas[i].rect): self.bonuscreado.append(self.enemigos[j].darBonus()) sale = self.enemigos.pop(j) del(self.rotor_tanque.balas_disparadas[i]) explosion = sale.destruir() #Explosion(sale.rect.center) self.explosiones.append(explosion) self.rotor_tanque.puntajeNivel += 1 del(sale) break break def guardarDatos(self, nombre): nombreJ = nombre + ".pysave" archivo = open(nombreJ, "w") vidaJ = self.rotor_tanque.vida tiempoJ = self.rotor_tanque.tiempo balasJ = self.rotor_tanque.balasPorDisparar puntajeJ = self.rotor_tanque.puntajeNivel archivo.write(str(vidaJ) + "\n") archivo.write(str(tiempoJ) + "\n") archivo.write(str(balasJ) + "\n") archivo.write(str(puntajeJ) + "\n") archivo.close() def cargarDatos(self, nombre): nombreJ = nombre + ".pysave" try: archivo = open(nombreJ) lis = archivo.readlines() self.rotor_tanque.vida = int(lis[0]) self.rotor_tanque.tiempo = int(lis[1]) self.rotor_tanque.balasPorDisparar = int(lis[2]) self.rotor_tanque.puntajeNivel = int(lis[3]) archivo.close() except(IOError): print("No hay datos registrados con ese nombre") def terminarJuego(self): if self.rotor_tanque.vida <= 0 or self.rotor_tanque.tiempo <= 0: pygame.mixer.music.stop() if self.rotor_tanque.puntajeNivel > 0: try: puntajes = open("puntajes.pyfile", 'a') puntajes.writelines(str(self.rotor_tanque.nombreJugador) + "\t\t" + str(self.rotor_tanque.puntajeNivel) + "\n") puntajes.close() except(IOError): pass try: import gameOver terminar = gameOver.GameOver(self.ventana) terminar.mainGameOver() del(gameOver) return True except(ImportError): print("No se encuentra el módulo correspondiente") return True else: return False def mainNivel1(self): reloj = pygame.time.Clock() pygame.key.set_repeat(1,25) funciones.cargarMusica(self.musica_fondo) pygame.mixer.music.play(-1) while True: self.pos_mouse = pygame.mouse.get_pos() self.bot_mouse = pygame.mouse.get_pressed() self.rotor_tanque.tiempo-=1 self.salir = self.terminarJuego() if self.salir == True: return 0 #Seccion de actualizacion de eventos for evento in pygame.event.get(): if evento.type == pygame.QUIT or evento.type == pygame.K_ESCAPE: pygame.mixer.music.stop() self.guardarDatos(self.rotor_tanque.nombreJugador) return 0 elif evento.type == pygame.KEYDOWN: self.base_tanque.actualizar(evento) if evento.type == pygame.MOUSEBUTTONDOWN: self.rotor_tanque.disparar(evento.button) #Seccion de dibujo self.ventana.blit(self.imagen_fondo, (0,0)) self.ventana.blit(self.base_tanque.postimagen, self.base_tanque.rect) self.rotor_tanque.actualizar(self.pos_mouse, self.ventana, self.base_tanque.rect.center) self.controlBonuses() self.controlEnemigos() self.actualizarEnemigos() self.controlExplosiones() self.controlColisiones() self.rotor_tanque.dibujar_Balas(self.ventana) self.rotor_tanque.actualizar1(self.ventana) pygame.display.update() reloj.tick(60) return 0
[ [ 8, 0, 0.0738, 0.019, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0905, 0.0048, 0, 0.66, 0.3333, 262, 0, 1, 0, 0, 262, 0, 0 ], [ 1, 0, 0.0952, 0.0048, 0, 0.66,...
[ "'''\nCreated on 10/05/2012\n@author: Juan Pablo Moreno y Alejandro Duarte\n'''", "from funcionesBasicas import Funciones as funciones", "from objetos import *", "class Nivel1():\n\t\"\"\"Escena numero 1 del juego\"\"\"\n\t\n\tdef __init__(self, pantalla, nombre):\n\t\tself.ventana = pantalla\n\t\tself.imagen...
# -*- coding: UTF-8 -*- # # puntajes.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. ''' Created on 8/07/2012 @author: Juan Pablo Moreno y Alejandro Duarte ''' import pygame from funcionesBasicas import Funciones as funciones class Puntajes(): def __init__(self, pantalla): self.ventana = pantalla self.imagen_fondo = funciones.cargarImagen("imagenes/puntajes.jpg") self.musica_fondo = "sonido/EXTEND SKY.ogg" self.color_texto=[255,255,255] self.puntajes = [] def cargarPuntajes(self): try: puntajes = open("puntajes.pyfile", 'r') lista = puntajes.readlines() puntajes.close() except(IOError): lista = "NO SE ENCONTRARON PUNTAJES REGISTRADOS".split() return lista def mainPuntajes(self): funciones.cargarMusica(self.musica_fondo) pygame.mixer.music.play(-1) self.puntajes = self.cargarPuntajes() while True: for evento in pygame.event.get(): if evento.type == pygame.QUIT or evento.type == pygame.KEYDOWN: pygame.mixer.music.stop() return 0 self.ventana.blit(self.imagen_fondo, (0,0)) for i in range(1, len(self.puntajes)+1): imagen, rect = funciones.dibujarTexto(self.puntajes[i-1].expandtabs(), funciones.VENTANA[0]/2, (funciones.VENTANA[1]/20) * i, self.color_texto) self.ventana.blit(imagen, rect) pygame.display.update() return 0
[ [ 8, 0, 0.25, 0.0645, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.2903, 0.0161, 0, 0.66, 0.3333, 87, 0, 1, 0, 0, 87, 0, 0 ], [ 1, 0, 0.3065, 0.0161, 0, 0.66, ...
[ "'''\nCreated on 8/07/2012\n@author: Juan Pablo Moreno y Alejandro Duarte\n'''", "import pygame", "from funcionesBasicas import Funciones as funciones", "class Puntajes():\n\t\n\tdef __init__(self, pantalla):\n\t\tself.ventana = pantalla\n\t\tself.imagen_fondo = funciones.cargarImagen(\"imagenes/puntajes.jpg\...
# -*- coding: UTF-8 -*- # # objetos.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. ''' Created on 10/05/2012 @author: Juan Pablo Moreno y Alejandro Duarte ''' import pygame, random from pygame.locals import * from funcionesBasicas import Funciones as funciones # ---------------------------------------------- # Clases # ---------------------------------------------- class Cursor(pygame.Rect): def __init__(self): pygame.Rect.__init__(self,0,0,1,1) def actualizar(self): self.left, self.top = pygame.mouse.get_pos() class Boton(pygame.sprite.Sprite): def __init__(self, img, x, y): self.imagen = img self.rect = self.imagen.get_rect() self.rect.left, self.rect.top = (x, y) class Jugador_Control(): """objeto controlador del momento del juego""" def __init__(self, nombreJ = "Jugador"): """definicion de algunas variables de tipo general""" self.nombreJugador = nombreJ self.puntajeTotal = 0 self.color_texto=[0,0,0] self.puntajeNivel = 0 self.nivel = 0 self.vida = 100 self.balasPorDisparar = 20 self.tiempo = 10000 def darValores(self, nombreJ, vidaJ, tiempoJ, balasJ, puntajeJ): """Realiza la persistencia de los datos""" self.nombreJugador = nombreJ self.puntajeTotal = 0 self.color_texto=[0,0,0] self.puntajeNivel = puntajeJ self.nivel = 0 self.vida = vidaJ self.balasPorDisparar = balasJ self.tiempo = tiempoJ def actualizar1(self, ventana): """dibujado en pantalla del texto""" self.puntos_img, self.puntos_rect = funciones.dibujarTexto("Puntos: " +str(self.puntajeNivel), 64, 16, self.color_texto) self.vidas_img, self.vidas_rect = funciones.dibujarTexto("Vida: " +str(self.vida), 64, 32, self.color_texto) self.balas_img, self.balas_rect = funciones.dibujarTexto("Balas: " + str(self.balasPorDisparar), 64, 48, self.color_texto) self.tiempo_img, self.tiempo_rect = funciones.dibujarTexto("Tiempo: " + str(int(self.tiempo/100)), 64, 64, self.color_texto) ventana.blit(self.puntos_img, self.puntos_rect) ventana.blit(self.vidas_img, self.vidas_rect) ventana.blit(self.balas_img, self.balas_rect) ventana.blit(self.tiempo_img, self.tiempo_rect) class Base_de_Tanque(pygame.sprite.Sprite): """Objeto tanque del primer nivel""" def __init__(self, ruta_img): pygame.sprite.Sprite.__init__(self) self.preimagen = funciones.cargarImagen(ruta_img) self.postimagen = self.preimagen self.rect = self.preimagen.get_rect() self.posicion=[100,100] self.rect.center = self.posicion self.velocidad = 5 def actualizar(self, evento): self.horizontal = int(evento.key == K_RIGHT) - int(evento.key == K_LEFT) self.vertical = int(evento.key == K_DOWN) - int(evento.key == K_UP) if self.horizontal != 0: self.posicion[0] += self.velocidad * self.horizontal if self.horizontal > 0: self.postimagen = pygame.transform.rotate(self.preimagen,0) elif self.horizontal < 0: self.postimagen = pygame.transform.rotate(self.preimagen,180) elif self.vertical != 0: self.posicion[1] += self.velocidad * self.vertical if self.vertical > 0: self.postimagen = pygame.transform.rotate(self.preimagen,270) elif self.vertical < 0: self.postimagen = pygame.transform.rotate(self.preimagen,90) self.posicion = [min(max(self.posicion[0], 0), funciones.VENTANA[0]), min(max(self.posicion[1], 0), funciones.VENTANA[1])] self.rect.center = self.posicion class Rotor_de_Tanque(pygame.sprite.Sprite, Jugador_Control): """Objeto tanque del primer nivel""" def __init__(self, ruta_img, ruta_snd, nombre): pygame.sprite.Sprite.__init__(self) Jugador_Control.__init__(self, nombre) self.preimagen = funciones.cargarImagen(ruta_img) self.postimagen = self.preimagen self.rect = self.preimagen.get_rect() self.disparo = funciones.cargarSonido(ruta_snd) self.balas_disparadas=[] def actualizar(self, mouse, ventana, baseTanque): self.angulo = funciones.direccionPunto(self.rect.centerx, self.rect.centery, mouse) self.postimagen = pygame.transform.rotate(self.preimagen, self.angulo) self.rect = self.postimagen.get_rect() self.rect.center = baseTanque ventana.blit(self.postimagen, self.rect) def disparar(self, boton_mouse): """dispara una bala""" if boton_mouse == 1: if self.balasPorDisparar > 0: bala = Bala("imagenes/nivel 1/bala.png", self.rect.center, self.angulo) self.disparo.play() self.balas_disparadas.append(bala) self.balasPorDisparar -= 1 def dibujar_Balas(self, ventana): for i in range(len(self.balas_disparadas)): if self.balas_disparadas[i].actualizar(ventana): del(self.balas_disparadas[i]) break class Enemigo_Tanque(pygame.sprite.Sprite): """Objeto enemigo del primer nivel""" def __init__(self, x, y, ruta_img, ruta_snd): pygame.sprite.Sprite.__init__(self) self.preimagen = funciones.cargarImagen(ruta_img) self.postimagen = self.preimagen self.rect = self.preimagen.get_rect() self.posicion = [x,y] self.rect.center = self.posicion self.disparo = funciones.cargarSonido(ruta_snd) self.velocidad = 0.5 self.frecuencia = random.randrange(250,500) self.alarma = self.frecuencia self.balas_disparadas = [] def actualizar(self, direccion): self.angulo = funciones.direccionPunto(self.posicion[0], self.posicion[1], direccion) self.postimagen = pygame.transform.rotate(self.preimagen, self.angulo) self.rect = self.postimagen.get_rect() self.posicion[0] += funciones.vectorEnX(self.velocidad, self.angulo) self.posicion[1] += funciones.vectorEnY(self.velocidad, self.angulo) self.rect.center = self.posicion self.alarma -= 1 def disparar(self): if self.alarma <= 0: bala = Bala("imagenes/nivel 1/bala.png", self.rect.center, self.angulo) self.disparo.play() self.frecuencia -= 50 if self.frecuencia <= 50: self.frecuencia = 500 self.alarma = self.frecuencia return bala return False def darBonus(self): bonus = random.randint(1,10) if bonus >= 5: bonus = 0 if bonus != 0: objBonus = Objeto_Bonus(self.rect.center, bonus) else: objBonus = None return objBonus def destruir(self): return Explosion(self.rect.center) class Bala(pygame.sprite.Sprite): """Objeto bala general para todos los objetos que disparan""" def __init__(self, ruta_img, posicion_inicial, angulo): pygame.sprite.Sprite.__init__(self) self.angulo = angulo self.imagen = pygame.transform.rotate(funciones.cargarImagen(ruta_img), self.angulo) self.rect = self.imagen.get_rect() self.velocidad = 4 self.posicion = list(posicion_inicial) self.rect.center = self.posicion def actualizar(self, ventana): self.posicion[0] += funciones.vectorEnX(self.velocidad, self.angulo) self.posicion[1] += funciones.vectorEnY(self.velocidad, self.angulo) self.rect.center = self.posicion ventana.blit(self.imagen, self.rect) if (self.posicion[0] > funciones.VENTANA[0]) or (self.posicion[0] < 0) \ or (self.posicion[1] > funciones.VENTANA[1]) or (self.posicion[1] < 0): return True class Explosion(pygame.sprite.Sprite): """Objeto que representa la explosion de otro objeto""" def __init__(self, posicion): pygame.sprite.Sprite.__init__(self) self.imagenes = funciones.agregarImagen("imagenes/nivel 1/explosion.png", 192, 192) self.rect = self.imagenes[0].get_rect() self.rect.center = posicion self.subimagen = 0 def actualizar(self, ventana): ventana.blit(self.imagenes[int(self.subimagen)], self.rect) self.subimagen += 0.2 if self.subimagen >= len(self.imagenes)-1: return True class Objeto_Bonus(pygame.sprite.Sprite): """Objeto recogible por el jugador""" def __init__(self, pos, tp): pygame.sprite.Sprite.__init__(self) self.tipo = tp self.tipos = {1:"vida", 2:"balas", 3:"tiempo", 4:"bomba"} if tp <= len(self.tipos): self.imagen = funciones.cargarImagen("imagenes/nivel 1/"+self.tipos[tp]+".png") self.rect = self.imagen.get_rect() self.rect.center = pos def actualizar(self, ventana, otro): ventana.blit(self.imagen, self.rect) return self.rect.colliderect(otro)
[ [ 8, 0, 0.062, 0.016, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.072, 0.004, 0, 0.66, 0.0833, 87, 0, 2, 0, 0, 87, 0, 0 ], [ 1, 0, 0.076, 0.004, 0, 0.66, 0....
[ "'''\nCreated on 10/05/2012\n@author: Juan Pablo Moreno y Alejandro Duarte\n'''", "import pygame, random", "from pygame.locals import *", "from funcionesBasicas import Funciones as funciones", "class Cursor(pygame.Rect):\n\t\n\tdef __init__(self):\n\t\tpygame.Rect.__init__(self,0,0,1,1)\n\t\t\n\tdef actuali...
# -*- coding: UTF-8 -*- # # creditos.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. ''' Created on 26/06/2012 @author: Juan Pablo Moreno y Alejandro Duarte ''' import pygame from funcionesBasicas import Funciones as funciones class Creditos(): def __init__(self, ventana): self.ventana = ventana self.imagen_fondo = funciones.cargarImagen("imagenes/creditos.jpg") self.musica_fondo = "sonido/SELAGINELLA.ogg" self.color_texto = [255,255,255] self.linea1_img, self.linea1_rect = funciones.dibujarTexto("JUEGO DESARROLLADO COMO PROYECTO DE", 400, 216, self.color_texto) self.linea2_img, self.linea2_rect = funciones.dibujarTexto("EL GRUPO DE TRABAJO PYGROUP", 400, 232, self.color_texto) self.linea3_img, self.linea3_rect = funciones.dibujarTexto("POR:", 400, 248, self.color_texto) self.linea4_img, self.linea4_rect = funciones.dibujarTexto("JUAN PABLO MORENO RICO 2011102059", 400, 264, self.color_texto) self.linea5_img, self.linea5_rect = funciones.dibujarTexto("ALEJANDRO DUARTE 20092020120", 400, 280, self.color_texto) self.linea6_img, self.linea6_rect = funciones.dibujarTexto("Musica por THE CROW'S CLAW", 400, 312, self.color_texto) self.linea7_img, self.linea7_rect = funciones.dibujarTexto("Imagenes tomadas de Internet de varios sitios", 400, 328, self.color_texto) self.linea8_img, self.linea8_rect = funciones.dibujarTexto("AGRADECEMOS EL INCENTIVAR ESTAS ACTIVIDADES", 400, 360, self.color_texto) def mainCreditos(self): funciones.cargarMusica(self.musica_fondo) pygame.mixer.music.play(-1) while True: for evento in pygame.event.get(): if evento.type == pygame.QUIT or evento.type == pygame.KEYDOWN: pygame.mixer.music.stop() return 0 self.ventana.blit(self.imagen_fondo, (0,0)) self.ventana.blit(self.linea1_img, self.linea1_rect) self.ventana.blit(self.linea2_img, self.linea2_rect) self.ventana.blit(self.linea3_img, self.linea3_rect) self.ventana.blit(self.linea4_img, self.linea4_rect) self.ventana.blit(self.linea5_img, self.linea5_rect) self.ventana.blit(self.linea6_img, self.linea6_rect) self.ventana.blit(self.linea7_img, self.linea7_rect) self.ventana.blit(self.linea8_img, self.linea8_rect) pygame.display.update() return 0
[ [ 8, 0, 0.2672, 0.069, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.3103, 0.0172, 0, 0.66, 0.3333, 87, 0, 1, 0, 0, 87, 0, 0 ], [ 1, 0, 0.3276, 0.0172, 0, 0.66, ...
[ "'''\nCreated on 26/06/2012\n@author: Juan Pablo Moreno y Alejandro Duarte\n'''", "import pygame", "from funcionesBasicas import Funciones as funciones", "class Creditos():\n\t\n\tdef __init__(self, ventana):\n\t\tself.ventana = ventana\n\t\tself.imagen_fondo = funciones.cargarImagen(\"imagenes/creditos.jpg\"...
#!/usr/bin/env python import sys import string if len( sys.argv ) == 1 : for asc_line in sys.stdin.readlines(): mpw_line = string.replace(asc_line, "\\xA5", "\245") mpw_line = string.replace(mpw_line, "\\xB6", "\266") mpw_line = string.replace(mpw_line, "\\xC4", "\304") mpw_line = string.replace(mpw_line, "\\xC5", "\305") mpw_line = string.replace(mpw_line, "\\xFF", "\377") mpw_line = string.replace(mpw_line, "\n", "\r") mpw_line = string.replace(mpw_line, "\\n", "\n") sys.stdout.write(mpw_line) elif sys.argv[1] == "-r" : for mpw_line in sys.stdin.readlines(): asc_line = string.replace(mpw_line, "\n", "\\n") asc_line = string.replace(asc_line, "\r", "\n") asc_line = string.replace(asc_line, "\245", "\\xA5") asc_line = string.replace(asc_line, "\266", "\\xB6") asc_line = string.replace(asc_line, "\304", "\\xC4") asc_line = string.replace(asc_line, "\305", "\\xC5") asc_line = string.replace(asc_line, "\377", "\\xFF") sys.stdout.write(asc_line)
[ [ 1, 0, 0.0833, 0.0417, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.125, 0.0417, 0, 0.66, 0.5, 890, 0, 1, 0, 0, 890, 0, 0 ], [ 4, 0, 0.6042, 0.8333, 0, 0.66...
[ "import sys", "import string", "if len( sys.argv ) == 1 :\n for asc_line in sys.stdin.readlines():\n mpw_line = string.replace(asc_line, \"\\\\xA5\", \"\\245\")\n mpw_line = string.replace(mpw_line, \"\\\\xB6\", \"\\266\")\n mpw_line = string.replace(mpw_line, \"\\\\xC4\", \"\\304\")\n mpw_line = s...
#!/usr/bin/env python import sys import string if len( sys.argv ) == 1 : for asc_line in sys.stdin.readlines(): mpw_line = string.replace(asc_line, "\\xA5", "\245") mpw_line = string.replace(mpw_line, "\\xB6", "\266") mpw_line = string.replace(mpw_line, "\\xC4", "\304") mpw_line = string.replace(mpw_line, "\\xC5", "\305") mpw_line = string.replace(mpw_line, "\\xFF", "\377") mpw_line = string.replace(mpw_line, "\n", "\r") mpw_line = string.replace(mpw_line, "\\n", "\n") sys.stdout.write(mpw_line) elif sys.argv[1] == "-r" : for mpw_line in sys.stdin.readlines(): asc_line = string.replace(mpw_line, "\n", "\\n") asc_line = string.replace(asc_line, "\r", "\n") asc_line = string.replace(asc_line, "\245", "\\xA5") asc_line = string.replace(asc_line, "\266", "\\xB6") asc_line = string.replace(asc_line, "\304", "\\xC4") asc_line = string.replace(asc_line, "\305", "\\xC5") asc_line = string.replace(asc_line, "\377", "\\xFF") sys.stdout.write(asc_line)
[ [ 1, 0, 0.0833, 0.0417, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.125, 0.0417, 0, 0.66, 0.5, 890, 0, 1, 0, 0, 890, 0, 0 ], [ 4, 0, 0.6042, 0.8333, 0, 0.66...
[ "import sys", "import string", "if len( sys.argv ) == 1 :\n for asc_line in sys.stdin.readlines():\n mpw_line = string.replace(asc_line, \"\\\\xA5\", \"\\245\")\n mpw_line = string.replace(mpw_line, \"\\\\xB6\", \"\\266\")\n mpw_line = string.replace(mpw_line, \"\\\\xC4\", \"\\304\")\n mpw_line = s...
#!/usr/bin/env python # # # FreeType 2 glyph name builder # # Copyright 1996-2000, 2003, 2005, 2007, 2008, 2011 by # David Turner, Robert Wilhelm, and Werner Lemberg. # # This file is part of the FreeType project, and may only be used, modified, # and distributed under the terms of the FreeType project license, # LICENSE.TXT. By continuing to use, modify, or distribute this file you # indicate that you have read the license and understand and accept it # fully. """\ usage: %s <output-file> This python script generates the glyph names tables defined in the `psnames' module. Its single argument is the name of the header file to be created. """ import sys, string, struct, re, os.path # This table lists the glyphs according to the Macintosh specification. # It is used by the TrueType Postscript names table. # # See # # http://fonts.apple.com/TTRefMan/RM06/Chap6post.html # # for the official list. # mac_standard_names = \ [ # 0 ".notdef", ".null", "nonmarkingreturn", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", "ampersand", # 10 "quotesingle", "parenleft", "parenright", "asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", # 20 "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "colon", # 30 "semicolon", "less", "equal", "greater", "question", "at", "A", "B", "C", "D", # 40 "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", # 50 "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", # 60 "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", "underscore", "grave", "a", "b", # 70 "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", # 80 "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", # 90 "w", "x", "y", "z", "braceleft", "bar", "braceright", "asciitilde", "Adieresis", "Aring", # 100 "Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis", "aacute", "agrave", "acircumflex", "adieresis", "atilde", # 110 "aring", "ccedilla", "eacute", "egrave", "ecircumflex", "edieresis", "iacute", "igrave", "icircumflex", "idieresis", # 120 "ntilde", "oacute", "ograve", "ocircumflex", "odieresis", "otilde", "uacute", "ugrave", "ucircumflex", "udieresis", # 130 "dagger", "degree", "cent", "sterling", "section", "bullet", "paragraph", "germandbls", "registered", "copyright", # 140 "trademark", "acute", "dieresis", "notequal", "AE", "Oslash", "infinity", "plusminus", "lessequal", "greaterequal", # 150 "yen", "mu", "partialdiff", "summation", "product", "pi", "integral", "ordfeminine", "ordmasculine", "Omega", # 160 "ae", "oslash", "questiondown", "exclamdown", "logicalnot", "radical", "florin", "approxequal", "Delta", "guillemotleft", # 170 "guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde", "Otilde", "OE", "oe", "endash", "emdash", # 180 "quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide", "lozenge", "ydieresis", "Ydieresis", "fraction", "currency", # 190 "guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl", "periodcentered", "quotesinglbase", "quotedblbase", "perthousand", "Acircumflex", # 200 "Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex", # 210 "apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave", "dotlessi", "circumflex", "tilde", "macron", "breve", # 220 "dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek", "caron", "Lslash", "lslash", "Scaron", "scaron", # 230 "Zcaron", "zcaron", "brokenbar", "Eth", "eth", "Yacute", "yacute", "Thorn", "thorn", "minus", # 240 "multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf", "onequarter", "threequarters", "franc", "Gbreve", "gbreve", # 250 "Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute", "Ccaron", "ccaron", "dcroat" ] # The list of standard `SID' glyph names. For the official list, # see Annex A of document at # # http://partners.adobe.com/public/developer/en/font/5176.CFF.pdf . # sid_standard_names = \ [ # 0 ".notdef", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", "ampersand", "quoteright", "parenleft", # 10 "parenright", "asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", # 20 "three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", "less", # 30 "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", # 40 "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", # 50 "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", # 60 "bracketleft", "backslash", "bracketright", "asciicircum", "underscore", "quoteleft", "a", "b", "c", "d", # 70 "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", # 80 "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", # 90 "y", "z", "braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", "sterling", "fraction", # 100 "yen", "florin", "section", "currency", "quotesingle", "quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", # 110 "fl", "endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright", # 120 "guillemotright", "ellipsis", "perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", "macron", "breve", # 130 "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", "ogonek", "caron", "emdash", "AE", "ordfeminine", # 140 "Lslash", "Oslash", "OE", "ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", # 150 "onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", "Thorn", "onequarter", "divide", # 160 "brokenbar", "degree", "thorn", "threequarters", "twosuperior", "registered", "minus", "eth", "multiply", "threesuperior", # 170 "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", "Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", # 180 "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis", # 190 "Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", # 200 "aacute", "acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", "ecircumflex", "edieresis", # 210 "egrave", "iacute", "icircumflex", "idieresis", "igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", # 220 "otilde", "scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall", # 230 "Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", "parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", "zerooldstyle", # 240 "oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "commasuperior", # 250 "threequartersemdash", "periodsuperior", "questionsmall", "asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior", # 260 "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", "tsuperior", "ff", "ffi", "ffl", "parenleftinferior", # 270 "parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", "Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", # 280 "Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", # 290 "Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall", # 300 "colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", "Dieresissmall", # 310 "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", "questiondownsmall", # 320 "oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", "zerosuperior", "foursuperior", "fivesuperior", "sixsuperior", # 330 "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", "fiveinferior", "sixinferior", # 340 "seveninferior", "eightinferior", "nineinferior", "centinferior", "dollarinferior", "periodinferior", "commainferior", "Agravesmall", "Aacutesmall", "Acircumflexsmall", # 350 "Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", # 360 "Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", "Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", "Odieresissmall", # 370 "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", "Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall", "001.000", # 380 "001.001", "001.002", "001.003", "Black", "Bold", "Book", "Light", "Medium", "Regular", "Roman", # 390 "Semibold" ] # This table maps character codes of the Adobe Standard Type 1 # encoding to glyph indices in the sid_standard_names table. # t1_standard_encoding = \ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 0, 111, 112, 113, 114, 0, 115, 116, 117, 118, 119, 120, 121, 122, 0, 123, 0, 124, 125, 126, 127, 128, 129, 130, 131, 0, 132, 133, 0, 134, 135, 136, 137, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, 0, 139, 0, 0, 0, 0, 140, 141, 142, 143, 0, 0, 0, 0, 0, 144, 0, 0, 0, 145, 0, 0, 146, 147, 148, 149, 0, 0, 0, 0 ] # This table maps character codes of the Adobe Expert Type 1 # encoding to glyph indices in the sid_standard_names table. # t1_expert_encoding = \ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 229, 230, 0, 231, 232, 233, 234, 235, 236, 237, 238, 13, 14, 15, 99, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 27, 28, 249, 250, 251, 252, 0, 253, 254, 255, 256, 257, 0, 0, 0, 258, 0, 0, 259, 260, 261, 262, 0, 0, 263, 264, 265, 0, 266, 109, 110, 267, 268, 269, 0, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 304, 305, 306, 0, 0, 307, 308, 309, 310, 311, 0, 312, 0, 0, 313, 0, 0, 314, 315, 0, 0, 316, 317, 318, 0, 0, 0, 158, 155, 163, 319, 320, 321, 322, 323, 324, 325, 0, 0, 326, 150, 164, 169, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378 ] # This data has been taken literally from the files `glyphlist.txt' # and `zapfdingbats.txt' version 2.0, Sept 2002. It is available from # # http://sourceforge.net/adobe/aglfn/ # adobe_glyph_list = """\ A;0041 AE;00C6 AEacute;01FC AEmacron;01E2 AEsmall;F7E6 Aacute;00C1 Aacutesmall;F7E1 Abreve;0102 Abreveacute;1EAE Abrevecyrillic;04D0 Abrevedotbelow;1EB6 Abrevegrave;1EB0 Abrevehookabove;1EB2 Abrevetilde;1EB4 Acaron;01CD Acircle;24B6 Acircumflex;00C2 Acircumflexacute;1EA4 Acircumflexdotbelow;1EAC Acircumflexgrave;1EA6 Acircumflexhookabove;1EA8 Acircumflexsmall;F7E2 Acircumflextilde;1EAA Acute;F6C9 Acutesmall;F7B4 Acyrillic;0410 Adblgrave;0200 Adieresis;00C4 Adieresiscyrillic;04D2 Adieresismacron;01DE Adieresissmall;F7E4 Adotbelow;1EA0 Adotmacron;01E0 Agrave;00C0 Agravesmall;F7E0 Ahookabove;1EA2 Aiecyrillic;04D4 Ainvertedbreve;0202 Alpha;0391 Alphatonos;0386 Amacron;0100 Amonospace;FF21 Aogonek;0104 Aring;00C5 Aringacute;01FA Aringbelow;1E00 Aringsmall;F7E5 Asmall;F761 Atilde;00C3 Atildesmall;F7E3 Aybarmenian;0531 B;0042 Bcircle;24B7 Bdotaccent;1E02 Bdotbelow;1E04 Becyrillic;0411 Benarmenian;0532 Beta;0392 Bhook;0181 Blinebelow;1E06 Bmonospace;FF22 Brevesmall;F6F4 Bsmall;F762 Btopbar;0182 C;0043 Caarmenian;053E Cacute;0106 Caron;F6CA Caronsmall;F6F5 Ccaron;010C Ccedilla;00C7 Ccedillaacute;1E08 Ccedillasmall;F7E7 Ccircle;24B8 Ccircumflex;0108 Cdot;010A Cdotaccent;010A Cedillasmall;F7B8 Chaarmenian;0549 Cheabkhasiancyrillic;04BC Checyrillic;0427 Chedescenderabkhasiancyrillic;04BE Chedescendercyrillic;04B6 Chedieresiscyrillic;04F4 Cheharmenian;0543 Chekhakassiancyrillic;04CB Cheverticalstrokecyrillic;04B8 Chi;03A7 Chook;0187 Circumflexsmall;F6F6 Cmonospace;FF23 Coarmenian;0551 Csmall;F763 D;0044 DZ;01F1 DZcaron;01C4 Daarmenian;0534 Dafrican;0189 Dcaron;010E Dcedilla;1E10 Dcircle;24B9 Dcircumflexbelow;1E12 Dcroat;0110 Ddotaccent;1E0A Ddotbelow;1E0C Decyrillic;0414 Deicoptic;03EE Delta;2206 Deltagreek;0394 Dhook;018A Dieresis;F6CB DieresisAcute;F6CC DieresisGrave;F6CD Dieresissmall;F7A8 Digammagreek;03DC Djecyrillic;0402 Dlinebelow;1E0E Dmonospace;FF24 Dotaccentsmall;F6F7 Dslash;0110 Dsmall;F764 Dtopbar;018B Dz;01F2 Dzcaron;01C5 Dzeabkhasiancyrillic;04E0 Dzecyrillic;0405 Dzhecyrillic;040F E;0045 Eacute;00C9 Eacutesmall;F7E9 Ebreve;0114 Ecaron;011A Ecedillabreve;1E1C Echarmenian;0535 Ecircle;24BA Ecircumflex;00CA Ecircumflexacute;1EBE Ecircumflexbelow;1E18 Ecircumflexdotbelow;1EC6 Ecircumflexgrave;1EC0 Ecircumflexhookabove;1EC2 Ecircumflexsmall;F7EA Ecircumflextilde;1EC4 Ecyrillic;0404 Edblgrave;0204 Edieresis;00CB Edieresissmall;F7EB Edot;0116 Edotaccent;0116 Edotbelow;1EB8 Efcyrillic;0424 Egrave;00C8 Egravesmall;F7E8 Eharmenian;0537 Ehookabove;1EBA Eightroman;2167 Einvertedbreve;0206 Eiotifiedcyrillic;0464 Elcyrillic;041B Elevenroman;216A Emacron;0112 Emacronacute;1E16 Emacrongrave;1E14 Emcyrillic;041C Emonospace;FF25 Encyrillic;041D Endescendercyrillic;04A2 Eng;014A Enghecyrillic;04A4 Enhookcyrillic;04C7 Eogonek;0118 Eopen;0190 Epsilon;0395 Epsilontonos;0388 Ercyrillic;0420 Ereversed;018E Ereversedcyrillic;042D Escyrillic;0421 Esdescendercyrillic;04AA Esh;01A9 Esmall;F765 Eta;0397 Etarmenian;0538 Etatonos;0389 Eth;00D0 Ethsmall;F7F0 Etilde;1EBC Etildebelow;1E1A Euro;20AC Ezh;01B7 Ezhcaron;01EE Ezhreversed;01B8 F;0046 Fcircle;24BB Fdotaccent;1E1E Feharmenian;0556 Feicoptic;03E4 Fhook;0191 Fitacyrillic;0472 Fiveroman;2164 Fmonospace;FF26 Fourroman;2163 Fsmall;F766 G;0047 GBsquare;3387 Gacute;01F4 Gamma;0393 Gammaafrican;0194 Gangiacoptic;03EA Gbreve;011E Gcaron;01E6 Gcedilla;0122 Gcircle;24BC Gcircumflex;011C Gcommaaccent;0122 Gdot;0120 Gdotaccent;0120 Gecyrillic;0413 Ghadarmenian;0542 Ghemiddlehookcyrillic;0494 Ghestrokecyrillic;0492 Gheupturncyrillic;0490 Ghook;0193 Gimarmenian;0533 Gjecyrillic;0403 Gmacron;1E20 Gmonospace;FF27 Grave;F6CE Gravesmall;F760 Gsmall;F767 Gsmallhook;029B Gstroke;01E4 H;0048 H18533;25CF H18543;25AA H18551;25AB H22073;25A1 HPsquare;33CB Haabkhasiancyrillic;04A8 Hadescendercyrillic;04B2 Hardsigncyrillic;042A Hbar;0126 Hbrevebelow;1E2A Hcedilla;1E28 Hcircle;24BD Hcircumflex;0124 Hdieresis;1E26 Hdotaccent;1E22 Hdotbelow;1E24 Hmonospace;FF28 Hoarmenian;0540 Horicoptic;03E8 Hsmall;F768 Hungarumlaut;F6CF Hungarumlautsmall;F6F8 Hzsquare;3390 I;0049 IAcyrillic;042F IJ;0132 IUcyrillic;042E Iacute;00CD Iacutesmall;F7ED Ibreve;012C Icaron;01CF Icircle;24BE Icircumflex;00CE Icircumflexsmall;F7EE Icyrillic;0406 Idblgrave;0208 Idieresis;00CF Idieresisacute;1E2E Idieresiscyrillic;04E4 Idieresissmall;F7EF Idot;0130 Idotaccent;0130 Idotbelow;1ECA Iebrevecyrillic;04D6 Iecyrillic;0415 Ifraktur;2111 Igrave;00CC Igravesmall;F7EC Ihookabove;1EC8 Iicyrillic;0418 Iinvertedbreve;020A Iishortcyrillic;0419 Imacron;012A Imacroncyrillic;04E2 Imonospace;FF29 Iniarmenian;053B Iocyrillic;0401 Iogonek;012E Iota;0399 Iotaafrican;0196 Iotadieresis;03AA Iotatonos;038A Ismall;F769 Istroke;0197 Itilde;0128 Itildebelow;1E2C Izhitsacyrillic;0474 Izhitsadblgravecyrillic;0476 J;004A Jaarmenian;0541 Jcircle;24BF Jcircumflex;0134 Jecyrillic;0408 Jheharmenian;054B Jmonospace;FF2A Jsmall;F76A K;004B KBsquare;3385 KKsquare;33CD Kabashkircyrillic;04A0 Kacute;1E30 Kacyrillic;041A Kadescendercyrillic;049A Kahookcyrillic;04C3 Kappa;039A Kastrokecyrillic;049E Kaverticalstrokecyrillic;049C Kcaron;01E8 Kcedilla;0136 Kcircle;24C0 Kcommaaccent;0136 Kdotbelow;1E32 Keharmenian;0554 Kenarmenian;053F Khacyrillic;0425 Kheicoptic;03E6 Khook;0198 Kjecyrillic;040C Klinebelow;1E34 Kmonospace;FF2B Koppacyrillic;0480 Koppagreek;03DE Ksicyrillic;046E Ksmall;F76B L;004C LJ;01C7 LL;F6BF Lacute;0139 Lambda;039B Lcaron;013D Lcedilla;013B Lcircle;24C1 Lcircumflexbelow;1E3C Lcommaaccent;013B Ldot;013F Ldotaccent;013F Ldotbelow;1E36 Ldotbelowmacron;1E38 Liwnarmenian;053C Lj;01C8 Ljecyrillic;0409 Llinebelow;1E3A Lmonospace;FF2C Lslash;0141 Lslashsmall;F6F9 Lsmall;F76C M;004D MBsquare;3386 Macron;F6D0 Macronsmall;F7AF Macute;1E3E Mcircle;24C2 Mdotaccent;1E40 Mdotbelow;1E42 Menarmenian;0544 Mmonospace;FF2D Msmall;F76D Mturned;019C Mu;039C N;004E NJ;01CA Nacute;0143 Ncaron;0147 Ncedilla;0145 Ncircle;24C3 Ncircumflexbelow;1E4A Ncommaaccent;0145 Ndotaccent;1E44 Ndotbelow;1E46 Nhookleft;019D Nineroman;2168 Nj;01CB Njecyrillic;040A Nlinebelow;1E48 Nmonospace;FF2E Nowarmenian;0546 Nsmall;F76E Ntilde;00D1 Ntildesmall;F7F1 Nu;039D O;004F OE;0152 OEsmall;F6FA Oacute;00D3 Oacutesmall;F7F3 Obarredcyrillic;04E8 Obarreddieresiscyrillic;04EA Obreve;014E Ocaron;01D1 Ocenteredtilde;019F Ocircle;24C4 Ocircumflex;00D4 Ocircumflexacute;1ED0 Ocircumflexdotbelow;1ED8 Ocircumflexgrave;1ED2 Ocircumflexhookabove;1ED4 Ocircumflexsmall;F7F4 Ocircumflextilde;1ED6 Ocyrillic;041E Odblacute;0150 Odblgrave;020C Odieresis;00D6 Odieresiscyrillic;04E6 Odieresissmall;F7F6 Odotbelow;1ECC Ogoneksmall;F6FB Ograve;00D2 Ogravesmall;F7F2 Oharmenian;0555 Ohm;2126 Ohookabove;1ECE Ohorn;01A0 Ohornacute;1EDA Ohorndotbelow;1EE2 Ohorngrave;1EDC Ohornhookabove;1EDE Ohorntilde;1EE0 Ohungarumlaut;0150 Oi;01A2 Oinvertedbreve;020E Omacron;014C Omacronacute;1E52 Omacrongrave;1E50 Omega;2126 Omegacyrillic;0460 Omegagreek;03A9 Omegaroundcyrillic;047A Omegatitlocyrillic;047C Omegatonos;038F Omicron;039F Omicrontonos;038C Omonospace;FF2F Oneroman;2160 Oogonek;01EA Oogonekmacron;01EC Oopen;0186 Oslash;00D8 Oslashacute;01FE Oslashsmall;F7F8 Osmall;F76F Ostrokeacute;01FE Otcyrillic;047E Otilde;00D5 Otildeacute;1E4C Otildedieresis;1E4E Otildesmall;F7F5 P;0050 Pacute;1E54 Pcircle;24C5 Pdotaccent;1E56 Pecyrillic;041F Peharmenian;054A Pemiddlehookcyrillic;04A6 Phi;03A6 Phook;01A4 Pi;03A0 Piwrarmenian;0553 Pmonospace;FF30 Psi;03A8 Psicyrillic;0470 Psmall;F770 Q;0051 Qcircle;24C6 Qmonospace;FF31 Qsmall;F771 R;0052 Raarmenian;054C Racute;0154 Rcaron;0158 Rcedilla;0156 Rcircle;24C7 Rcommaaccent;0156 Rdblgrave;0210 Rdotaccent;1E58 Rdotbelow;1E5A Rdotbelowmacron;1E5C Reharmenian;0550 Rfraktur;211C Rho;03A1 Ringsmall;F6FC Rinvertedbreve;0212 Rlinebelow;1E5E Rmonospace;FF32 Rsmall;F772 Rsmallinverted;0281 Rsmallinvertedsuperior;02B6 S;0053 SF010000;250C SF020000;2514 SF030000;2510 SF040000;2518 SF050000;253C SF060000;252C SF070000;2534 SF080000;251C SF090000;2524 SF100000;2500 SF110000;2502 SF190000;2561 SF200000;2562 SF210000;2556 SF220000;2555 SF230000;2563 SF240000;2551 SF250000;2557 SF260000;255D SF270000;255C SF280000;255B SF360000;255E SF370000;255F SF380000;255A SF390000;2554 SF400000;2569 SF410000;2566 SF420000;2560 SF430000;2550 SF440000;256C SF450000;2567 SF460000;2568 SF470000;2564 SF480000;2565 SF490000;2559 SF500000;2558 SF510000;2552 SF520000;2553 SF530000;256B SF540000;256A Sacute;015A Sacutedotaccent;1E64 Sampigreek;03E0 Scaron;0160 Scarondotaccent;1E66 Scaronsmall;F6FD Scedilla;015E Schwa;018F Schwacyrillic;04D8 Schwadieresiscyrillic;04DA Scircle;24C8 Scircumflex;015C Scommaaccent;0218 Sdotaccent;1E60 Sdotbelow;1E62 Sdotbelowdotaccent;1E68 Seharmenian;054D Sevenroman;2166 Shaarmenian;0547 Shacyrillic;0428 Shchacyrillic;0429 Sheicoptic;03E2 Shhacyrillic;04BA Shimacoptic;03EC Sigma;03A3 Sixroman;2165 Smonospace;FF33 Softsigncyrillic;042C Ssmall;F773 Stigmagreek;03DA T;0054 Tau;03A4 Tbar;0166 Tcaron;0164 Tcedilla;0162 Tcircle;24C9 Tcircumflexbelow;1E70 Tcommaaccent;0162 Tdotaccent;1E6A Tdotbelow;1E6C Tecyrillic;0422 Tedescendercyrillic;04AC Tenroman;2169 Tetsecyrillic;04B4 Theta;0398 Thook;01AC Thorn;00DE Thornsmall;F7FE Threeroman;2162 Tildesmall;F6FE Tiwnarmenian;054F Tlinebelow;1E6E Tmonospace;FF34 Toarmenian;0539 Tonefive;01BC Tonesix;0184 Tonetwo;01A7 Tretroflexhook;01AE Tsecyrillic;0426 Tshecyrillic;040B Tsmall;F774 Twelveroman;216B Tworoman;2161 U;0055 Uacute;00DA Uacutesmall;F7FA Ubreve;016C Ucaron;01D3 Ucircle;24CA Ucircumflex;00DB Ucircumflexbelow;1E76 Ucircumflexsmall;F7FB Ucyrillic;0423 Udblacute;0170 Udblgrave;0214 Udieresis;00DC Udieresisacute;01D7 Udieresisbelow;1E72 Udieresiscaron;01D9 Udieresiscyrillic;04F0 Udieresisgrave;01DB Udieresismacron;01D5 Udieresissmall;F7FC Udotbelow;1EE4 Ugrave;00D9 Ugravesmall;F7F9 Uhookabove;1EE6 Uhorn;01AF Uhornacute;1EE8 Uhorndotbelow;1EF0 Uhorngrave;1EEA Uhornhookabove;1EEC Uhorntilde;1EEE Uhungarumlaut;0170 Uhungarumlautcyrillic;04F2 Uinvertedbreve;0216 Ukcyrillic;0478 Umacron;016A Umacroncyrillic;04EE Umacrondieresis;1E7A Umonospace;FF35 Uogonek;0172 Upsilon;03A5 Upsilon1;03D2 Upsilonacutehooksymbolgreek;03D3 Upsilonafrican;01B1 Upsilondieresis;03AB Upsilondieresishooksymbolgreek;03D4 Upsilonhooksymbol;03D2 Upsilontonos;038E Uring;016E Ushortcyrillic;040E Usmall;F775 Ustraightcyrillic;04AE Ustraightstrokecyrillic;04B0 Utilde;0168 Utildeacute;1E78 Utildebelow;1E74 V;0056 Vcircle;24CB Vdotbelow;1E7E Vecyrillic;0412 Vewarmenian;054E Vhook;01B2 Vmonospace;FF36 Voarmenian;0548 Vsmall;F776 Vtilde;1E7C W;0057 Wacute;1E82 Wcircle;24CC Wcircumflex;0174 Wdieresis;1E84 Wdotaccent;1E86 Wdotbelow;1E88 Wgrave;1E80 Wmonospace;FF37 Wsmall;F777 X;0058 Xcircle;24CD Xdieresis;1E8C Xdotaccent;1E8A Xeharmenian;053D Xi;039E Xmonospace;FF38 Xsmall;F778 Y;0059 Yacute;00DD Yacutesmall;F7FD Yatcyrillic;0462 Ycircle;24CE Ycircumflex;0176 Ydieresis;0178 Ydieresissmall;F7FF Ydotaccent;1E8E Ydotbelow;1EF4 Yericyrillic;042B Yerudieresiscyrillic;04F8 Ygrave;1EF2 Yhook;01B3 Yhookabove;1EF6 Yiarmenian;0545 Yicyrillic;0407 Yiwnarmenian;0552 Ymonospace;FF39 Ysmall;F779 Ytilde;1EF8 Yusbigcyrillic;046A Yusbigiotifiedcyrillic;046C Yuslittlecyrillic;0466 Yuslittleiotifiedcyrillic;0468 Z;005A Zaarmenian;0536 Zacute;0179 Zcaron;017D Zcaronsmall;F6FF Zcircle;24CF Zcircumflex;1E90 Zdot;017B Zdotaccent;017B Zdotbelow;1E92 Zecyrillic;0417 Zedescendercyrillic;0498 Zedieresiscyrillic;04DE Zeta;0396 Zhearmenian;053A Zhebrevecyrillic;04C1 Zhecyrillic;0416 Zhedescendercyrillic;0496 Zhedieresiscyrillic;04DC Zlinebelow;1E94 Zmonospace;FF3A Zsmall;F77A Zstroke;01B5 a;0061 aabengali;0986 aacute;00E1 aadeva;0906 aagujarati;0A86 aagurmukhi;0A06 aamatragurmukhi;0A3E aarusquare;3303 aavowelsignbengali;09BE aavowelsigndeva;093E aavowelsigngujarati;0ABE abbreviationmarkarmenian;055F abbreviationsigndeva;0970 abengali;0985 abopomofo;311A abreve;0103 abreveacute;1EAF abrevecyrillic;04D1 abrevedotbelow;1EB7 abrevegrave;1EB1 abrevehookabove;1EB3 abrevetilde;1EB5 acaron;01CE acircle;24D0 acircumflex;00E2 acircumflexacute;1EA5 acircumflexdotbelow;1EAD acircumflexgrave;1EA7 acircumflexhookabove;1EA9 acircumflextilde;1EAB acute;00B4 acutebelowcmb;0317 acutecmb;0301 acutecomb;0301 acutedeva;0954 acutelowmod;02CF acutetonecmb;0341 acyrillic;0430 adblgrave;0201 addakgurmukhi;0A71 adeva;0905 adieresis;00E4 adieresiscyrillic;04D3 adieresismacron;01DF adotbelow;1EA1 adotmacron;01E1 ae;00E6 aeacute;01FD aekorean;3150 aemacron;01E3 afii00208;2015 afii08941;20A4 afii10017;0410 afii10018;0411 afii10019;0412 afii10020;0413 afii10021;0414 afii10022;0415 afii10023;0401 afii10024;0416 afii10025;0417 afii10026;0418 afii10027;0419 afii10028;041A afii10029;041B afii10030;041C afii10031;041D afii10032;041E afii10033;041F afii10034;0420 afii10035;0421 afii10036;0422 afii10037;0423 afii10038;0424 afii10039;0425 afii10040;0426 afii10041;0427 afii10042;0428 afii10043;0429 afii10044;042A afii10045;042B afii10046;042C afii10047;042D afii10048;042E afii10049;042F afii10050;0490 afii10051;0402 afii10052;0403 afii10053;0404 afii10054;0405 afii10055;0406 afii10056;0407 afii10057;0408 afii10058;0409 afii10059;040A afii10060;040B afii10061;040C afii10062;040E afii10063;F6C4 afii10064;F6C5 afii10065;0430 afii10066;0431 afii10067;0432 afii10068;0433 afii10069;0434 afii10070;0435 afii10071;0451 afii10072;0436 afii10073;0437 afii10074;0438 afii10075;0439 afii10076;043A afii10077;043B afii10078;043C afii10079;043D afii10080;043E afii10081;043F afii10082;0440 afii10083;0441 afii10084;0442 afii10085;0443 afii10086;0444 afii10087;0445 afii10088;0446 afii10089;0447 afii10090;0448 afii10091;0449 afii10092;044A afii10093;044B afii10094;044C afii10095;044D afii10096;044E afii10097;044F afii10098;0491 afii10099;0452 afii10100;0453 afii10101;0454 afii10102;0455 afii10103;0456 afii10104;0457 afii10105;0458 afii10106;0459 afii10107;045A afii10108;045B afii10109;045C afii10110;045E afii10145;040F afii10146;0462 afii10147;0472 afii10148;0474 afii10192;F6C6 afii10193;045F afii10194;0463 afii10195;0473 afii10196;0475 afii10831;F6C7 afii10832;F6C8 afii10846;04D9 afii299;200E afii300;200F afii301;200D afii57381;066A afii57388;060C afii57392;0660 afii57393;0661 afii57394;0662 afii57395;0663 afii57396;0664 afii57397;0665 afii57398;0666 afii57399;0667 afii57400;0668 afii57401;0669 afii57403;061B afii57407;061F afii57409;0621 afii57410;0622 afii57411;0623 afii57412;0624 afii57413;0625 afii57414;0626 afii57415;0627 afii57416;0628 afii57417;0629 afii57418;062A afii57419;062B afii57420;062C afii57421;062D afii57422;062E afii57423;062F afii57424;0630 afii57425;0631 afii57426;0632 afii57427;0633 afii57428;0634 afii57429;0635 afii57430;0636 afii57431;0637 afii57432;0638 afii57433;0639 afii57434;063A afii57440;0640 afii57441;0641 afii57442;0642 afii57443;0643 afii57444;0644 afii57445;0645 afii57446;0646 afii57448;0648 afii57449;0649 afii57450;064A afii57451;064B afii57452;064C afii57453;064D afii57454;064E afii57455;064F afii57456;0650 afii57457;0651 afii57458;0652 afii57470;0647 afii57505;06A4 afii57506;067E afii57507;0686 afii57508;0698 afii57509;06AF afii57511;0679 afii57512;0688 afii57513;0691 afii57514;06BA afii57519;06D2 afii57534;06D5 afii57636;20AA afii57645;05BE afii57658;05C3 afii57664;05D0 afii57665;05D1 afii57666;05D2 afii57667;05D3 afii57668;05D4 afii57669;05D5 afii57670;05D6 afii57671;05D7 afii57672;05D8 afii57673;05D9 afii57674;05DA afii57675;05DB afii57676;05DC afii57677;05DD afii57678;05DE afii57679;05DF afii57680;05E0 afii57681;05E1 afii57682;05E2 afii57683;05E3 afii57684;05E4 afii57685;05E5 afii57686;05E6 afii57687;05E7 afii57688;05E8 afii57689;05E9 afii57690;05EA afii57694;FB2A afii57695;FB2B afii57700;FB4B afii57705;FB1F afii57716;05F0 afii57717;05F1 afii57718;05F2 afii57723;FB35 afii57793;05B4 afii57794;05B5 afii57795;05B6 afii57796;05BB afii57797;05B8 afii57798;05B7 afii57799;05B0 afii57800;05B2 afii57801;05B1 afii57802;05B3 afii57803;05C2 afii57804;05C1 afii57806;05B9 afii57807;05BC afii57839;05BD afii57841;05BF afii57842;05C0 afii57929;02BC afii61248;2105 afii61289;2113 afii61352;2116 afii61573;202C afii61574;202D afii61575;202E afii61664;200C afii63167;066D afii64937;02BD agrave;00E0 agujarati;0A85 agurmukhi;0A05 ahiragana;3042 ahookabove;1EA3 aibengali;0990 aibopomofo;311E aideva;0910 aiecyrillic;04D5 aigujarati;0A90 aigurmukhi;0A10 aimatragurmukhi;0A48 ainarabic;0639 ainfinalarabic;FECA aininitialarabic;FECB ainmedialarabic;FECC ainvertedbreve;0203 aivowelsignbengali;09C8 aivowelsigndeva;0948 aivowelsigngujarati;0AC8 akatakana;30A2 akatakanahalfwidth;FF71 akorean;314F alef;05D0 alefarabic;0627 alefdageshhebrew;FB30 aleffinalarabic;FE8E alefhamzaabovearabic;0623 alefhamzaabovefinalarabic;FE84 alefhamzabelowarabic;0625 alefhamzabelowfinalarabic;FE88 alefhebrew;05D0 aleflamedhebrew;FB4F alefmaddaabovearabic;0622 alefmaddaabovefinalarabic;FE82 alefmaksuraarabic;0649 alefmaksurafinalarabic;FEF0 alefmaksurainitialarabic;FEF3 alefmaksuramedialarabic;FEF4 alefpatahhebrew;FB2E alefqamatshebrew;FB2F aleph;2135 allequal;224C alpha;03B1 alphatonos;03AC amacron;0101 amonospace;FF41 ampersand;0026 ampersandmonospace;FF06 ampersandsmall;F726 amsquare;33C2 anbopomofo;3122 angbopomofo;3124 angkhankhuthai;0E5A angle;2220 anglebracketleft;3008 anglebracketleftvertical;FE3F anglebracketright;3009 anglebracketrightvertical;FE40 angleleft;2329 angleright;232A angstrom;212B anoteleia;0387 anudattadeva;0952 anusvarabengali;0982 anusvaradeva;0902 anusvaragujarati;0A82 aogonek;0105 apaatosquare;3300 aparen;249C apostrophearmenian;055A apostrophemod;02BC apple;F8FF approaches;2250 approxequal;2248 approxequalorimage;2252 approximatelyequal;2245 araeaekorean;318E araeakorean;318D arc;2312 arighthalfring;1E9A aring;00E5 aringacute;01FB aringbelow;1E01 arrowboth;2194 arrowdashdown;21E3 arrowdashleft;21E0 arrowdashright;21E2 arrowdashup;21E1 arrowdblboth;21D4 arrowdbldown;21D3 arrowdblleft;21D0 arrowdblright;21D2 arrowdblup;21D1 arrowdown;2193 arrowdownleft;2199 arrowdownright;2198 arrowdownwhite;21E9 arrowheaddownmod;02C5 arrowheadleftmod;02C2 arrowheadrightmod;02C3 arrowheadupmod;02C4 arrowhorizex;F8E7 arrowleft;2190 arrowleftdbl;21D0 arrowleftdblstroke;21CD arrowleftoverright;21C6 arrowleftwhite;21E6 arrowright;2192 arrowrightdblstroke;21CF arrowrightheavy;279E arrowrightoverleft;21C4 arrowrightwhite;21E8 arrowtableft;21E4 arrowtabright;21E5 arrowup;2191 arrowupdn;2195 arrowupdnbse;21A8 arrowupdownbase;21A8 arrowupleft;2196 arrowupleftofdown;21C5 arrowupright;2197 arrowupwhite;21E7 arrowvertex;F8E6 asciicircum;005E asciicircummonospace;FF3E asciitilde;007E asciitildemonospace;FF5E ascript;0251 ascriptturned;0252 asmallhiragana;3041 asmallkatakana;30A1 asmallkatakanahalfwidth;FF67 asterisk;002A asteriskaltonearabic;066D asteriskarabic;066D asteriskmath;2217 asteriskmonospace;FF0A asterisksmall;FE61 asterism;2042 asuperior;F6E9 asymptoticallyequal;2243 at;0040 atilde;00E3 atmonospace;FF20 atsmall;FE6B aturned;0250 aubengali;0994 aubopomofo;3120 audeva;0914 augujarati;0A94 augurmukhi;0A14 aulengthmarkbengali;09D7 aumatragurmukhi;0A4C auvowelsignbengali;09CC auvowelsigndeva;094C auvowelsigngujarati;0ACC avagrahadeva;093D aybarmenian;0561 ayin;05E2 ayinaltonehebrew;FB20 ayinhebrew;05E2 b;0062 babengali;09AC backslash;005C backslashmonospace;FF3C badeva;092C bagujarati;0AAC bagurmukhi;0A2C bahiragana;3070 bahtthai;0E3F bakatakana;30D0 bar;007C barmonospace;FF5C bbopomofo;3105 bcircle;24D1 bdotaccent;1E03 bdotbelow;1E05 beamedsixteenthnotes;266C because;2235 becyrillic;0431 beharabic;0628 behfinalarabic;FE90 behinitialarabic;FE91 behiragana;3079 behmedialarabic;FE92 behmeeminitialarabic;FC9F behmeemisolatedarabic;FC08 behnoonfinalarabic;FC6D bekatakana;30D9 benarmenian;0562 bet;05D1 beta;03B2 betasymbolgreek;03D0 betdagesh;FB31 betdageshhebrew;FB31 bethebrew;05D1 betrafehebrew;FB4C bhabengali;09AD bhadeva;092D bhagujarati;0AAD bhagurmukhi;0A2D bhook;0253 bihiragana;3073 bikatakana;30D3 bilabialclick;0298 bindigurmukhi;0A02 birusquare;3331 blackcircle;25CF blackdiamond;25C6 blackdownpointingtriangle;25BC blackleftpointingpointer;25C4 blackleftpointingtriangle;25C0 blacklenticularbracketleft;3010 blacklenticularbracketleftvertical;FE3B blacklenticularbracketright;3011 blacklenticularbracketrightvertical;FE3C blacklowerlefttriangle;25E3 blacklowerrighttriangle;25E2 blackrectangle;25AC blackrightpointingpointer;25BA blackrightpointingtriangle;25B6 blacksmallsquare;25AA blacksmilingface;263B blacksquare;25A0 blackstar;2605 blackupperlefttriangle;25E4 blackupperrighttriangle;25E5 blackuppointingsmalltriangle;25B4 blackuppointingtriangle;25B2 blank;2423 blinebelow;1E07 block;2588 bmonospace;FF42 bobaimaithai;0E1A bohiragana;307C bokatakana;30DC bparen;249D bqsquare;33C3 braceex;F8F4 braceleft;007B braceleftbt;F8F3 braceleftmid;F8F2 braceleftmonospace;FF5B braceleftsmall;FE5B bracelefttp;F8F1 braceleftvertical;FE37 braceright;007D bracerightbt;F8FE bracerightmid;F8FD bracerightmonospace;FF5D bracerightsmall;FE5C bracerighttp;F8FC bracerightvertical;FE38 bracketleft;005B bracketleftbt;F8F0 bracketleftex;F8EF bracketleftmonospace;FF3B bracketlefttp;F8EE bracketright;005D bracketrightbt;F8FB bracketrightex;F8FA bracketrightmonospace;FF3D bracketrighttp;F8F9 breve;02D8 brevebelowcmb;032E brevecmb;0306 breveinvertedbelowcmb;032F breveinvertedcmb;0311 breveinverteddoublecmb;0361 bridgebelowcmb;032A bridgeinvertedbelowcmb;033A brokenbar;00A6 bstroke;0180 bsuperior;F6EA btopbar;0183 buhiragana;3076 bukatakana;30D6 bullet;2022 bulletinverse;25D8 bulletoperator;2219 bullseye;25CE c;0063 caarmenian;056E cabengali;099A cacute;0107 cadeva;091A cagujarati;0A9A cagurmukhi;0A1A calsquare;3388 candrabindubengali;0981 candrabinducmb;0310 candrabindudeva;0901 candrabindugujarati;0A81 capslock;21EA careof;2105 caron;02C7 caronbelowcmb;032C caroncmb;030C carriagereturn;21B5 cbopomofo;3118 ccaron;010D ccedilla;00E7 ccedillaacute;1E09 ccircle;24D2 ccircumflex;0109 ccurl;0255 cdot;010B cdotaccent;010B cdsquare;33C5 cedilla;00B8 cedillacmb;0327 cent;00A2 centigrade;2103 centinferior;F6DF centmonospace;FFE0 centoldstyle;F7A2 centsuperior;F6E0 chaarmenian;0579 chabengali;099B chadeva;091B chagujarati;0A9B chagurmukhi;0A1B chbopomofo;3114 cheabkhasiancyrillic;04BD checkmark;2713 checyrillic;0447 chedescenderabkhasiancyrillic;04BF chedescendercyrillic;04B7 chedieresiscyrillic;04F5 cheharmenian;0573 chekhakassiancyrillic;04CC cheverticalstrokecyrillic;04B9 chi;03C7 chieuchacirclekorean;3277 chieuchaparenkorean;3217 chieuchcirclekorean;3269 chieuchkorean;314A chieuchparenkorean;3209 chochangthai;0E0A chochanthai;0E08 chochingthai;0E09 chochoethai;0E0C chook;0188 cieucacirclekorean;3276 cieucaparenkorean;3216 cieuccirclekorean;3268 cieuckorean;3148 cieucparenkorean;3208 cieucuparenkorean;321C circle;25CB circlemultiply;2297 circleot;2299 circleplus;2295 circlepostalmark;3036 circlewithlefthalfblack;25D0 circlewithrighthalfblack;25D1 circumflex;02C6 circumflexbelowcmb;032D circumflexcmb;0302 clear;2327 clickalveolar;01C2 clickdental;01C0 clicklateral;01C1 clickretroflex;01C3 club;2663 clubsuitblack;2663 clubsuitwhite;2667 cmcubedsquare;33A4 cmonospace;FF43 cmsquaredsquare;33A0 coarmenian;0581 colon;003A colonmonetary;20A1 colonmonospace;FF1A colonsign;20A1 colonsmall;FE55 colontriangularhalfmod;02D1 colontriangularmod;02D0 comma;002C commaabovecmb;0313 commaaboverightcmb;0315 commaaccent;F6C3 commaarabic;060C commaarmenian;055D commainferior;F6E1 commamonospace;FF0C commareversedabovecmb;0314 commareversedmod;02BD commasmall;FE50 commasuperior;F6E2 commaturnedabovecmb;0312 commaturnedmod;02BB compass;263C congruent;2245 contourintegral;222E control;2303 controlACK;0006 controlBEL;0007 controlBS;0008 controlCAN;0018 controlCR;000D controlDC1;0011 controlDC2;0012 controlDC3;0013 controlDC4;0014 controlDEL;007F controlDLE;0010 controlEM;0019 controlENQ;0005 controlEOT;0004 controlESC;001B controlETB;0017 controlETX;0003 controlFF;000C controlFS;001C controlGS;001D controlHT;0009 controlLF;000A controlNAK;0015 controlRS;001E controlSI;000F controlSO;000E controlSOT;0002 controlSTX;0001 controlSUB;001A controlSYN;0016 controlUS;001F controlVT;000B copyright;00A9 copyrightsans;F8E9 copyrightserif;F6D9 cornerbracketleft;300C cornerbracketlefthalfwidth;FF62 cornerbracketleftvertical;FE41 cornerbracketright;300D cornerbracketrighthalfwidth;FF63 cornerbracketrightvertical;FE42 corporationsquare;337F cosquare;33C7 coverkgsquare;33C6 cparen;249E cruzeiro;20A2 cstretched;0297 curlyand;22CF curlyor;22CE currency;00A4 cyrBreve;F6D1 cyrFlex;F6D2 cyrbreve;F6D4 cyrflex;F6D5 d;0064 daarmenian;0564 dabengali;09A6 dadarabic;0636 dadeva;0926 dadfinalarabic;FEBE dadinitialarabic;FEBF dadmedialarabic;FEC0 dagesh;05BC dageshhebrew;05BC dagger;2020 daggerdbl;2021 dagujarati;0AA6 dagurmukhi;0A26 dahiragana;3060 dakatakana;30C0 dalarabic;062F dalet;05D3 daletdagesh;FB33 daletdageshhebrew;FB33 dalethatafpatah;05D3 05B2 dalethatafpatahhebrew;05D3 05B2 dalethatafsegol;05D3 05B1 dalethatafsegolhebrew;05D3 05B1 dalethebrew;05D3 dalethiriq;05D3 05B4 dalethiriqhebrew;05D3 05B4 daletholam;05D3 05B9 daletholamhebrew;05D3 05B9 daletpatah;05D3 05B7 daletpatahhebrew;05D3 05B7 daletqamats;05D3 05B8 daletqamatshebrew;05D3 05B8 daletqubuts;05D3 05BB daletqubutshebrew;05D3 05BB daletsegol;05D3 05B6 daletsegolhebrew;05D3 05B6 daletsheva;05D3 05B0 daletshevahebrew;05D3 05B0 dalettsere;05D3 05B5 dalettserehebrew;05D3 05B5 dalfinalarabic;FEAA dammaarabic;064F dammalowarabic;064F dammatanaltonearabic;064C dammatanarabic;064C danda;0964 dargahebrew;05A7 dargalefthebrew;05A7 dasiapneumatacyrilliccmb;0485 dblGrave;F6D3 dblanglebracketleft;300A dblanglebracketleftvertical;FE3D dblanglebracketright;300B dblanglebracketrightvertical;FE3E dblarchinvertedbelowcmb;032B dblarrowleft;21D4 dblarrowright;21D2 dbldanda;0965 dblgrave;F6D6 dblgravecmb;030F dblintegral;222C dbllowline;2017 dbllowlinecmb;0333 dbloverlinecmb;033F dblprimemod;02BA dblverticalbar;2016 dblverticallineabovecmb;030E dbopomofo;3109 dbsquare;33C8 dcaron;010F dcedilla;1E11 dcircle;24D3 dcircumflexbelow;1E13 dcroat;0111 ddabengali;09A1 ddadeva;0921 ddagujarati;0AA1 ddagurmukhi;0A21 ddalarabic;0688 ddalfinalarabic;FB89 dddhadeva;095C ddhabengali;09A2 ddhadeva;0922 ddhagujarati;0AA2 ddhagurmukhi;0A22 ddotaccent;1E0B ddotbelow;1E0D decimalseparatorarabic;066B decimalseparatorpersian;066B decyrillic;0434 degree;00B0 dehihebrew;05AD dehiragana;3067 deicoptic;03EF dekatakana;30C7 deleteleft;232B deleteright;2326 delta;03B4 deltaturned;018D denominatorminusonenumeratorbengali;09F8 dezh;02A4 dhabengali;09A7 dhadeva;0927 dhagujarati;0AA7 dhagurmukhi;0A27 dhook;0257 dialytikatonos;0385 dialytikatonoscmb;0344 diamond;2666 diamondsuitwhite;2662 dieresis;00A8 dieresisacute;F6D7 dieresisbelowcmb;0324 dieresiscmb;0308 dieresisgrave;F6D8 dieresistonos;0385 dihiragana;3062 dikatakana;30C2 dittomark;3003 divide;00F7 divides;2223 divisionslash;2215 djecyrillic;0452 dkshade;2593 dlinebelow;1E0F dlsquare;3397 dmacron;0111 dmonospace;FF44 dnblock;2584 dochadathai;0E0E dodekthai;0E14 dohiragana;3069 dokatakana;30C9 dollar;0024 dollarinferior;F6E3 dollarmonospace;FF04 dollaroldstyle;F724 dollarsmall;FE69 dollarsuperior;F6E4 dong;20AB dorusquare;3326 dotaccent;02D9 dotaccentcmb;0307 dotbelowcmb;0323 dotbelowcomb;0323 dotkatakana;30FB dotlessi;0131 dotlessj;F6BE dotlessjstrokehook;0284 dotmath;22C5 dottedcircle;25CC doubleyodpatah;FB1F doubleyodpatahhebrew;FB1F downtackbelowcmb;031E downtackmod;02D5 dparen;249F dsuperior;F6EB dtail;0256 dtopbar;018C duhiragana;3065 dukatakana;30C5 dz;01F3 dzaltone;02A3 dzcaron;01C6 dzcurl;02A5 dzeabkhasiancyrillic;04E1 dzecyrillic;0455 dzhecyrillic;045F e;0065 eacute;00E9 earth;2641 ebengali;098F ebopomofo;311C ebreve;0115 ecandradeva;090D ecandragujarati;0A8D ecandravowelsigndeva;0945 ecandravowelsigngujarati;0AC5 ecaron;011B ecedillabreve;1E1D echarmenian;0565 echyiwnarmenian;0587 ecircle;24D4 ecircumflex;00EA ecircumflexacute;1EBF ecircumflexbelow;1E19 ecircumflexdotbelow;1EC7 ecircumflexgrave;1EC1 ecircumflexhookabove;1EC3 ecircumflextilde;1EC5 ecyrillic;0454 edblgrave;0205 edeva;090F edieresis;00EB edot;0117 edotaccent;0117 edotbelow;1EB9 eegurmukhi;0A0F eematragurmukhi;0A47 efcyrillic;0444 egrave;00E8 egujarati;0A8F eharmenian;0567 ehbopomofo;311D ehiragana;3048 ehookabove;1EBB eibopomofo;311F eight;0038 eightarabic;0668 eightbengali;09EE eightcircle;2467 eightcircleinversesansserif;2791 eightdeva;096E eighteencircle;2471 eighteenparen;2485 eighteenperiod;2499 eightgujarati;0AEE eightgurmukhi;0A6E eighthackarabic;0668 eighthangzhou;3028 eighthnotebeamed;266B eightideographicparen;3227 eightinferior;2088 eightmonospace;FF18 eightoldstyle;F738 eightparen;247B eightperiod;248F eightpersian;06F8 eightroman;2177 eightsuperior;2078 eightthai;0E58 einvertedbreve;0207 eiotifiedcyrillic;0465 ekatakana;30A8 ekatakanahalfwidth;FF74 ekonkargurmukhi;0A74 ekorean;3154 elcyrillic;043B element;2208 elevencircle;246A elevenparen;247E elevenperiod;2492 elevenroman;217A ellipsis;2026 ellipsisvertical;22EE emacron;0113 emacronacute;1E17 emacrongrave;1E15 emcyrillic;043C emdash;2014 emdashvertical;FE31 emonospace;FF45 emphasismarkarmenian;055B emptyset;2205 enbopomofo;3123 encyrillic;043D endash;2013 endashvertical;FE32 endescendercyrillic;04A3 eng;014B engbopomofo;3125 enghecyrillic;04A5 enhookcyrillic;04C8 enspace;2002 eogonek;0119 eokorean;3153 eopen;025B eopenclosed;029A eopenreversed;025C eopenreversedclosed;025E eopenreversedhook;025D eparen;24A0 epsilon;03B5 epsilontonos;03AD equal;003D equalmonospace;FF1D equalsmall;FE66 equalsuperior;207C equivalence;2261 erbopomofo;3126 ercyrillic;0440 ereversed;0258 ereversedcyrillic;044D escyrillic;0441 esdescendercyrillic;04AB esh;0283 eshcurl;0286 eshortdeva;090E eshortvowelsigndeva;0946 eshreversedloop;01AA eshsquatreversed;0285 esmallhiragana;3047 esmallkatakana;30A7 esmallkatakanahalfwidth;FF6A estimated;212E esuperior;F6EC eta;03B7 etarmenian;0568 etatonos;03AE eth;00F0 etilde;1EBD etildebelow;1E1B etnahtafoukhhebrew;0591 etnahtafoukhlefthebrew;0591 etnahtahebrew;0591 etnahtalefthebrew;0591 eturned;01DD eukorean;3161 euro;20AC evowelsignbengali;09C7 evowelsigndeva;0947 evowelsigngujarati;0AC7 exclam;0021 exclamarmenian;055C exclamdbl;203C exclamdown;00A1 exclamdownsmall;F7A1 exclammonospace;FF01 exclamsmall;F721 existential;2203 ezh;0292 ezhcaron;01EF ezhcurl;0293 ezhreversed;01B9 ezhtail;01BA f;0066 fadeva;095E fagurmukhi;0A5E fahrenheit;2109 fathaarabic;064E fathalowarabic;064E fathatanarabic;064B fbopomofo;3108 fcircle;24D5 fdotaccent;1E1F feharabic;0641 feharmenian;0586 fehfinalarabic;FED2 fehinitialarabic;FED3 fehmedialarabic;FED4 feicoptic;03E5 female;2640 ff;FB00 ffi;FB03 ffl;FB04 fi;FB01 fifteencircle;246E fifteenparen;2482 fifteenperiod;2496 figuredash;2012 filledbox;25A0 filledrect;25AC finalkaf;05DA finalkafdagesh;FB3A finalkafdageshhebrew;FB3A finalkafhebrew;05DA finalkafqamats;05DA 05B8 finalkafqamatshebrew;05DA 05B8 finalkafsheva;05DA 05B0 finalkafshevahebrew;05DA 05B0 finalmem;05DD finalmemhebrew;05DD finalnun;05DF finalnunhebrew;05DF finalpe;05E3 finalpehebrew;05E3 finaltsadi;05E5 finaltsadihebrew;05E5 firsttonechinese;02C9 fisheye;25C9 fitacyrillic;0473 five;0035 fivearabic;0665 fivebengali;09EB fivecircle;2464 fivecircleinversesansserif;278E fivedeva;096B fiveeighths;215D fivegujarati;0AEB fivegurmukhi;0A6B fivehackarabic;0665 fivehangzhou;3025 fiveideographicparen;3224 fiveinferior;2085 fivemonospace;FF15 fiveoldstyle;F735 fiveparen;2478 fiveperiod;248C fivepersian;06F5 fiveroman;2174 fivesuperior;2075 fivethai;0E55 fl;FB02 florin;0192 fmonospace;FF46 fmsquare;3399 fofanthai;0E1F fofathai;0E1D fongmanthai;0E4F forall;2200 four;0034 fourarabic;0664 fourbengali;09EA fourcircle;2463 fourcircleinversesansserif;278D fourdeva;096A fourgujarati;0AEA fourgurmukhi;0A6A fourhackarabic;0664 fourhangzhou;3024 fourideographicparen;3223 fourinferior;2084 fourmonospace;FF14 fournumeratorbengali;09F7 fouroldstyle;F734 fourparen;2477 fourperiod;248B fourpersian;06F4 fourroman;2173 foursuperior;2074 fourteencircle;246D fourteenparen;2481 fourteenperiod;2495 fourthai;0E54 fourthtonechinese;02CB fparen;24A1 fraction;2044 franc;20A3 g;0067 gabengali;0997 gacute;01F5 gadeva;0917 gafarabic;06AF gaffinalarabic;FB93 gafinitialarabic;FB94 gafmedialarabic;FB95 gagujarati;0A97 gagurmukhi;0A17 gahiragana;304C gakatakana;30AC gamma;03B3 gammalatinsmall;0263 gammasuperior;02E0 gangiacoptic;03EB gbopomofo;310D gbreve;011F gcaron;01E7 gcedilla;0123 gcircle;24D6 gcircumflex;011D gcommaaccent;0123 gdot;0121 gdotaccent;0121 gecyrillic;0433 gehiragana;3052 gekatakana;30B2 geometricallyequal;2251 gereshaccenthebrew;059C gereshhebrew;05F3 gereshmuqdamhebrew;059D germandbls;00DF gershayimaccenthebrew;059E gershayimhebrew;05F4 getamark;3013 ghabengali;0998 ghadarmenian;0572 ghadeva;0918 ghagujarati;0A98 ghagurmukhi;0A18 ghainarabic;063A ghainfinalarabic;FECE ghaininitialarabic;FECF ghainmedialarabic;FED0 ghemiddlehookcyrillic;0495 ghestrokecyrillic;0493 gheupturncyrillic;0491 ghhadeva;095A ghhagurmukhi;0A5A ghook;0260 ghzsquare;3393 gihiragana;304E gikatakana;30AE gimarmenian;0563 gimel;05D2 gimeldagesh;FB32 gimeldageshhebrew;FB32 gimelhebrew;05D2 gjecyrillic;0453 glottalinvertedstroke;01BE glottalstop;0294 glottalstopinverted;0296 glottalstopmod;02C0 glottalstopreversed;0295 glottalstopreversedmod;02C1 glottalstopreversedsuperior;02E4 glottalstopstroke;02A1 glottalstopstrokereversed;02A2 gmacron;1E21 gmonospace;FF47 gohiragana;3054 gokatakana;30B4 gparen;24A2 gpasquare;33AC gradient;2207 grave;0060 gravebelowcmb;0316 gravecmb;0300 gravecomb;0300 gravedeva;0953 gravelowmod;02CE gravemonospace;FF40 gravetonecmb;0340 greater;003E greaterequal;2265 greaterequalorless;22DB greatermonospace;FF1E greaterorequivalent;2273 greaterorless;2277 greateroverequal;2267 greatersmall;FE65 gscript;0261 gstroke;01E5 guhiragana;3050 guillemotleft;00AB guillemotright;00BB guilsinglleft;2039 guilsinglright;203A gukatakana;30B0 guramusquare;3318 gysquare;33C9 h;0068 haabkhasiancyrillic;04A9 haaltonearabic;06C1 habengali;09B9 hadescendercyrillic;04B3 hadeva;0939 hagujarati;0AB9 hagurmukhi;0A39 haharabic;062D hahfinalarabic;FEA2 hahinitialarabic;FEA3 hahiragana;306F hahmedialarabic;FEA4 haitusquare;332A hakatakana;30CF hakatakanahalfwidth;FF8A halantgurmukhi;0A4D hamzaarabic;0621 hamzadammaarabic;0621 064F hamzadammatanarabic;0621 064C hamzafathaarabic;0621 064E hamzafathatanarabic;0621 064B hamzalowarabic;0621 hamzalowkasraarabic;0621 0650 hamzalowkasratanarabic;0621 064D hamzasukunarabic;0621 0652 hangulfiller;3164 hardsigncyrillic;044A harpoonleftbarbup;21BC harpoonrightbarbup;21C0 hasquare;33CA hatafpatah;05B2 hatafpatah16;05B2 hatafpatah23;05B2 hatafpatah2f;05B2 hatafpatahhebrew;05B2 hatafpatahnarrowhebrew;05B2 hatafpatahquarterhebrew;05B2 hatafpatahwidehebrew;05B2 hatafqamats;05B3 hatafqamats1b;05B3 hatafqamats28;05B3 hatafqamats34;05B3 hatafqamatshebrew;05B3 hatafqamatsnarrowhebrew;05B3 hatafqamatsquarterhebrew;05B3 hatafqamatswidehebrew;05B3 hatafsegol;05B1 hatafsegol17;05B1 hatafsegol24;05B1 hatafsegol30;05B1 hatafsegolhebrew;05B1 hatafsegolnarrowhebrew;05B1 hatafsegolquarterhebrew;05B1 hatafsegolwidehebrew;05B1 hbar;0127 hbopomofo;310F hbrevebelow;1E2B hcedilla;1E29 hcircle;24D7 hcircumflex;0125 hdieresis;1E27 hdotaccent;1E23 hdotbelow;1E25 he;05D4 heart;2665 heartsuitblack;2665 heartsuitwhite;2661 hedagesh;FB34 hedageshhebrew;FB34 hehaltonearabic;06C1 heharabic;0647 hehebrew;05D4 hehfinalaltonearabic;FBA7 hehfinalalttwoarabic;FEEA hehfinalarabic;FEEA hehhamzaabovefinalarabic;FBA5 hehhamzaaboveisolatedarabic;FBA4 hehinitialaltonearabic;FBA8 hehinitialarabic;FEEB hehiragana;3078 hehmedialaltonearabic;FBA9 hehmedialarabic;FEEC heiseierasquare;337B hekatakana;30D8 hekatakanahalfwidth;FF8D hekutaarusquare;3336 henghook;0267 herutusquare;3339 het;05D7 hethebrew;05D7 hhook;0266 hhooksuperior;02B1 hieuhacirclekorean;327B hieuhaparenkorean;321B hieuhcirclekorean;326D hieuhkorean;314E hieuhparenkorean;320D hihiragana;3072 hikatakana;30D2 hikatakanahalfwidth;FF8B hiriq;05B4 hiriq14;05B4 hiriq21;05B4 hiriq2d;05B4 hiriqhebrew;05B4 hiriqnarrowhebrew;05B4 hiriqquarterhebrew;05B4 hiriqwidehebrew;05B4 hlinebelow;1E96 hmonospace;FF48 hoarmenian;0570 hohipthai;0E2B hohiragana;307B hokatakana;30DB hokatakanahalfwidth;FF8E holam;05B9 holam19;05B9 holam26;05B9 holam32;05B9 holamhebrew;05B9 holamnarrowhebrew;05B9 holamquarterhebrew;05B9 holamwidehebrew;05B9 honokhukthai;0E2E hookabovecomb;0309 hookcmb;0309 hookpalatalizedbelowcmb;0321 hookretroflexbelowcmb;0322 hoonsquare;3342 horicoptic;03E9 horizontalbar;2015 horncmb;031B hotsprings;2668 house;2302 hparen;24A3 hsuperior;02B0 hturned;0265 huhiragana;3075 huiitosquare;3333 hukatakana;30D5 hukatakanahalfwidth;FF8C hungarumlaut;02DD hungarumlautcmb;030B hv;0195 hyphen;002D hypheninferior;F6E5 hyphenmonospace;FF0D hyphensmall;FE63 hyphensuperior;F6E6 hyphentwo;2010 i;0069 iacute;00ED iacyrillic;044F ibengali;0987 ibopomofo;3127 ibreve;012D icaron;01D0 icircle;24D8 icircumflex;00EE icyrillic;0456 idblgrave;0209 ideographearthcircle;328F ideographfirecircle;328B ideographicallianceparen;323F ideographiccallparen;323A ideographiccentrecircle;32A5 ideographicclose;3006 ideographiccomma;3001 ideographiccommaleft;FF64 ideographiccongratulationparen;3237 ideographiccorrectcircle;32A3 ideographicearthparen;322F ideographicenterpriseparen;323D ideographicexcellentcircle;329D ideographicfestivalparen;3240 ideographicfinancialcircle;3296 ideographicfinancialparen;3236 ideographicfireparen;322B ideographichaveparen;3232 ideographichighcircle;32A4 ideographiciterationmark;3005 ideographiclaborcircle;3298 ideographiclaborparen;3238 ideographicleftcircle;32A7 ideographiclowcircle;32A6 ideographicmedicinecircle;32A9 ideographicmetalparen;322E ideographicmoonparen;322A ideographicnameparen;3234 ideographicperiod;3002 ideographicprintcircle;329E ideographicreachparen;3243 ideographicrepresentparen;3239 ideographicresourceparen;323E ideographicrightcircle;32A8 ideographicsecretcircle;3299 ideographicselfparen;3242 ideographicsocietyparen;3233 ideographicspace;3000 ideographicspecialparen;3235 ideographicstockparen;3231 ideographicstudyparen;323B ideographicsunparen;3230 ideographicsuperviseparen;323C ideographicwaterparen;322C ideographicwoodparen;322D ideographiczero;3007 ideographmetalcircle;328E ideographmooncircle;328A ideographnamecircle;3294 ideographsuncircle;3290 ideographwatercircle;328C ideographwoodcircle;328D ideva;0907 idieresis;00EF idieresisacute;1E2F idieresiscyrillic;04E5 idotbelow;1ECB iebrevecyrillic;04D7 iecyrillic;0435 ieungacirclekorean;3275 ieungaparenkorean;3215 ieungcirclekorean;3267 ieungkorean;3147 ieungparenkorean;3207 igrave;00EC igujarati;0A87 igurmukhi;0A07 ihiragana;3044 ihookabove;1EC9 iibengali;0988 iicyrillic;0438 iideva;0908 iigujarati;0A88 iigurmukhi;0A08 iimatragurmukhi;0A40 iinvertedbreve;020B iishortcyrillic;0439 iivowelsignbengali;09C0 iivowelsigndeva;0940 iivowelsigngujarati;0AC0 ij;0133 ikatakana;30A4 ikatakanahalfwidth;FF72 ikorean;3163 ilde;02DC iluyhebrew;05AC imacron;012B imacroncyrillic;04E3 imageorapproximatelyequal;2253 imatragurmukhi;0A3F imonospace;FF49 increment;2206 infinity;221E iniarmenian;056B integral;222B integralbottom;2321 integralbt;2321 integralex;F8F5 integraltop;2320 integraltp;2320 intersection;2229 intisquare;3305 invbullet;25D8 invcircle;25D9 invsmileface;263B iocyrillic;0451 iogonek;012F iota;03B9 iotadieresis;03CA iotadieresistonos;0390 iotalatin;0269 iotatonos;03AF iparen;24A4 irigurmukhi;0A72 ismallhiragana;3043 ismallkatakana;30A3 ismallkatakanahalfwidth;FF68 issharbengali;09FA istroke;0268 isuperior;F6ED iterationhiragana;309D iterationkatakana;30FD itilde;0129 itildebelow;1E2D iubopomofo;3129 iucyrillic;044E ivowelsignbengali;09BF ivowelsigndeva;093F ivowelsigngujarati;0ABF izhitsacyrillic;0475 izhitsadblgravecyrillic;0477 j;006A jaarmenian;0571 jabengali;099C jadeva;091C jagujarati;0A9C jagurmukhi;0A1C jbopomofo;3110 jcaron;01F0 jcircle;24D9 jcircumflex;0135 jcrossedtail;029D jdotlessstroke;025F jecyrillic;0458 jeemarabic;062C jeemfinalarabic;FE9E jeeminitialarabic;FE9F jeemmedialarabic;FEA0 jeharabic;0698 jehfinalarabic;FB8B jhabengali;099D jhadeva;091D jhagujarati;0A9D jhagurmukhi;0A1D jheharmenian;057B jis;3004 jmonospace;FF4A jparen;24A5 jsuperior;02B2 k;006B kabashkircyrillic;04A1 kabengali;0995 kacute;1E31 kacyrillic;043A kadescendercyrillic;049B kadeva;0915 kaf;05DB kafarabic;0643 kafdagesh;FB3B kafdageshhebrew;FB3B kaffinalarabic;FEDA kafhebrew;05DB kafinitialarabic;FEDB kafmedialarabic;FEDC kafrafehebrew;FB4D kagujarati;0A95 kagurmukhi;0A15 kahiragana;304B kahookcyrillic;04C4 kakatakana;30AB kakatakanahalfwidth;FF76 kappa;03BA kappasymbolgreek;03F0 kapyeounmieumkorean;3171 kapyeounphieuphkorean;3184 kapyeounpieupkorean;3178 kapyeounssangpieupkorean;3179 karoriisquare;330D kashidaautoarabic;0640 kashidaautonosidebearingarabic;0640 kasmallkatakana;30F5 kasquare;3384 kasraarabic;0650 kasratanarabic;064D kastrokecyrillic;049F katahiraprolongmarkhalfwidth;FF70 kaverticalstrokecyrillic;049D kbopomofo;310E kcalsquare;3389 kcaron;01E9 kcedilla;0137 kcircle;24DA kcommaaccent;0137 kdotbelow;1E33 keharmenian;0584 kehiragana;3051 kekatakana;30B1 kekatakanahalfwidth;FF79 kenarmenian;056F kesmallkatakana;30F6 kgreenlandic;0138 khabengali;0996 khacyrillic;0445 khadeva;0916 khagujarati;0A96 khagurmukhi;0A16 khaharabic;062E khahfinalarabic;FEA6 khahinitialarabic;FEA7 khahmedialarabic;FEA8 kheicoptic;03E7 khhadeva;0959 khhagurmukhi;0A59 khieukhacirclekorean;3278 khieukhaparenkorean;3218 khieukhcirclekorean;326A khieukhkorean;314B khieukhparenkorean;320A khokhaithai;0E02 khokhonthai;0E05 khokhuatthai;0E03 khokhwaithai;0E04 khomutthai;0E5B khook;0199 khorakhangthai;0E06 khzsquare;3391 kihiragana;304D kikatakana;30AD kikatakanahalfwidth;FF77 kiroguramusquare;3315 kiromeetorusquare;3316 kirosquare;3314 kiyeokacirclekorean;326E kiyeokaparenkorean;320E kiyeokcirclekorean;3260 kiyeokkorean;3131 kiyeokparenkorean;3200 kiyeoksioskorean;3133 kjecyrillic;045C klinebelow;1E35 klsquare;3398 kmcubedsquare;33A6 kmonospace;FF4B kmsquaredsquare;33A2 kohiragana;3053 kohmsquare;33C0 kokaithai;0E01 kokatakana;30B3 kokatakanahalfwidth;FF7A kooposquare;331E koppacyrillic;0481 koreanstandardsymbol;327F koroniscmb;0343 kparen;24A6 kpasquare;33AA ksicyrillic;046F ktsquare;33CF kturned;029E kuhiragana;304F kukatakana;30AF kukatakanahalfwidth;FF78 kvsquare;33B8 kwsquare;33BE l;006C labengali;09B2 lacute;013A ladeva;0932 lagujarati;0AB2 lagurmukhi;0A32 lakkhangyaothai;0E45 lamaleffinalarabic;FEFC lamalefhamzaabovefinalarabic;FEF8 lamalefhamzaaboveisolatedarabic;FEF7 lamalefhamzabelowfinalarabic;FEFA lamalefhamzabelowisolatedarabic;FEF9 lamalefisolatedarabic;FEFB lamalefmaddaabovefinalarabic;FEF6 lamalefmaddaaboveisolatedarabic;FEF5 lamarabic;0644 lambda;03BB lambdastroke;019B lamed;05DC lameddagesh;FB3C lameddageshhebrew;FB3C lamedhebrew;05DC lamedholam;05DC 05B9 lamedholamdagesh;05DC 05B9 05BC lamedholamdageshhebrew;05DC 05B9 05BC lamedholamhebrew;05DC 05B9 lamfinalarabic;FEDE lamhahinitialarabic;FCCA laminitialarabic;FEDF lamjeeminitialarabic;FCC9 lamkhahinitialarabic;FCCB lamlamhehisolatedarabic;FDF2 lammedialarabic;FEE0 lammeemhahinitialarabic;FD88 lammeeminitialarabic;FCCC lammeemjeeminitialarabic;FEDF FEE4 FEA0 lammeemkhahinitialarabic;FEDF FEE4 FEA8 largecircle;25EF lbar;019A lbelt;026C lbopomofo;310C lcaron;013E lcedilla;013C lcircle;24DB lcircumflexbelow;1E3D lcommaaccent;013C ldot;0140 ldotaccent;0140 ldotbelow;1E37 ldotbelowmacron;1E39 leftangleabovecmb;031A lefttackbelowcmb;0318 less;003C lessequal;2264 lessequalorgreater;22DA lessmonospace;FF1C lessorequivalent;2272 lessorgreater;2276 lessoverequal;2266 lesssmall;FE64 lezh;026E lfblock;258C lhookretroflex;026D lira;20A4 liwnarmenian;056C lj;01C9 ljecyrillic;0459 ll;F6C0 lladeva;0933 llagujarati;0AB3 llinebelow;1E3B llladeva;0934 llvocalicbengali;09E1 llvocalicdeva;0961 llvocalicvowelsignbengali;09E3 llvocalicvowelsigndeva;0963 lmiddletilde;026B lmonospace;FF4C lmsquare;33D0 lochulathai;0E2C logicaland;2227 logicalnot;00AC logicalnotreversed;2310 logicalor;2228 lolingthai;0E25 longs;017F lowlinecenterline;FE4E lowlinecmb;0332 lowlinedashed;FE4D lozenge;25CA lparen;24A7 lslash;0142 lsquare;2113 lsuperior;F6EE ltshade;2591 luthai;0E26 lvocalicbengali;098C lvocalicdeva;090C lvocalicvowelsignbengali;09E2 lvocalicvowelsigndeva;0962 lxsquare;33D3 m;006D mabengali;09AE macron;00AF macronbelowcmb;0331 macroncmb;0304 macronlowmod;02CD macronmonospace;FFE3 macute;1E3F madeva;092E magujarati;0AAE magurmukhi;0A2E mahapakhhebrew;05A4 mahapakhlefthebrew;05A4 mahiragana;307E maichattawalowleftthai;F895 maichattawalowrightthai;F894 maichattawathai;0E4B maichattawaupperleftthai;F893 maieklowleftthai;F88C maieklowrightthai;F88B maiekthai;0E48 maiekupperleftthai;F88A maihanakatleftthai;F884 maihanakatthai;0E31 maitaikhuleftthai;F889 maitaikhuthai;0E47 maitholowleftthai;F88F maitholowrightthai;F88E maithothai;0E49 maithoupperleftthai;F88D maitrilowleftthai;F892 maitrilowrightthai;F891 maitrithai;0E4A maitriupperleftthai;F890 maiyamokthai;0E46 makatakana;30DE makatakanahalfwidth;FF8F male;2642 mansyonsquare;3347 maqafhebrew;05BE mars;2642 masoracirclehebrew;05AF masquare;3383 mbopomofo;3107 mbsquare;33D4 mcircle;24DC mcubedsquare;33A5 mdotaccent;1E41 mdotbelow;1E43 meemarabic;0645 meemfinalarabic;FEE2 meeminitialarabic;FEE3 meemmedialarabic;FEE4 meemmeeminitialarabic;FCD1 meemmeemisolatedarabic;FC48 meetorusquare;334D mehiragana;3081 meizierasquare;337E mekatakana;30E1 mekatakanahalfwidth;FF92 mem;05DE memdagesh;FB3E memdageshhebrew;FB3E memhebrew;05DE menarmenian;0574 merkhahebrew;05A5 merkhakefulahebrew;05A6 merkhakefulalefthebrew;05A6 merkhalefthebrew;05A5 mhook;0271 mhzsquare;3392 middledotkatakanahalfwidth;FF65 middot;00B7 mieumacirclekorean;3272 mieumaparenkorean;3212 mieumcirclekorean;3264 mieumkorean;3141 mieumpansioskorean;3170 mieumparenkorean;3204 mieumpieupkorean;316E mieumsioskorean;316F mihiragana;307F mikatakana;30DF mikatakanahalfwidth;FF90 minus;2212 minusbelowcmb;0320 minuscircle;2296 minusmod;02D7 minusplus;2213 minute;2032 miribaarusquare;334A mirisquare;3349 mlonglegturned;0270 mlsquare;3396 mmcubedsquare;33A3 mmonospace;FF4D mmsquaredsquare;339F mohiragana;3082 mohmsquare;33C1 mokatakana;30E2 mokatakanahalfwidth;FF93 molsquare;33D6 momathai;0E21 moverssquare;33A7 moverssquaredsquare;33A8 mparen;24A8 mpasquare;33AB mssquare;33B3 msuperior;F6EF mturned;026F mu;00B5 mu1;00B5 muasquare;3382 muchgreater;226B muchless;226A mufsquare;338C mugreek;03BC mugsquare;338D muhiragana;3080 mukatakana;30E0 mukatakanahalfwidth;FF91 mulsquare;3395 multiply;00D7 mumsquare;339B munahhebrew;05A3 munahlefthebrew;05A3 musicalnote;266A musicalnotedbl;266B musicflatsign;266D musicsharpsign;266F mussquare;33B2 muvsquare;33B6 muwsquare;33BC mvmegasquare;33B9 mvsquare;33B7 mwmegasquare;33BF mwsquare;33BD n;006E nabengali;09A8 nabla;2207 nacute;0144 nadeva;0928 nagujarati;0AA8 nagurmukhi;0A28 nahiragana;306A nakatakana;30CA nakatakanahalfwidth;FF85 napostrophe;0149 nasquare;3381 nbopomofo;310B nbspace;00A0 ncaron;0148 ncedilla;0146 ncircle;24DD ncircumflexbelow;1E4B ncommaaccent;0146 ndotaccent;1E45 ndotbelow;1E47 nehiragana;306D nekatakana;30CD nekatakanahalfwidth;FF88 newsheqelsign;20AA nfsquare;338B ngabengali;0999 ngadeva;0919 ngagujarati;0A99 ngagurmukhi;0A19 ngonguthai;0E07 nhiragana;3093 nhookleft;0272 nhookretroflex;0273 nieunacirclekorean;326F nieunaparenkorean;320F nieuncieuckorean;3135 nieuncirclekorean;3261 nieunhieuhkorean;3136 nieunkorean;3134 nieunpansioskorean;3168 nieunparenkorean;3201 nieunsioskorean;3167 nieuntikeutkorean;3166 nihiragana;306B nikatakana;30CB nikatakanahalfwidth;FF86 nikhahitleftthai;F899 nikhahitthai;0E4D nine;0039 ninearabic;0669 ninebengali;09EF ninecircle;2468 ninecircleinversesansserif;2792 ninedeva;096F ninegujarati;0AEF ninegurmukhi;0A6F ninehackarabic;0669 ninehangzhou;3029 nineideographicparen;3228 nineinferior;2089 ninemonospace;FF19 nineoldstyle;F739 nineparen;247C nineperiod;2490 ninepersian;06F9 nineroman;2178 ninesuperior;2079 nineteencircle;2472 nineteenparen;2486 nineteenperiod;249A ninethai;0E59 nj;01CC njecyrillic;045A nkatakana;30F3 nkatakanahalfwidth;FF9D nlegrightlong;019E nlinebelow;1E49 nmonospace;FF4E nmsquare;339A nnabengali;09A3 nnadeva;0923 nnagujarati;0AA3 nnagurmukhi;0A23 nnnadeva;0929 nohiragana;306E nokatakana;30CE nokatakanahalfwidth;FF89 nonbreakingspace;00A0 nonenthai;0E13 nonuthai;0E19 noonarabic;0646 noonfinalarabic;FEE6 noonghunnaarabic;06BA noonghunnafinalarabic;FB9F noonhehinitialarabic;FEE7 FEEC nooninitialarabic;FEE7 noonjeeminitialarabic;FCD2 noonjeemisolatedarabic;FC4B noonmedialarabic;FEE8 noonmeeminitialarabic;FCD5 noonmeemisolatedarabic;FC4E noonnoonfinalarabic;FC8D notcontains;220C notelement;2209 notelementof;2209 notequal;2260 notgreater;226F notgreaternorequal;2271 notgreaternorless;2279 notidentical;2262 notless;226E notlessnorequal;2270 notparallel;2226 notprecedes;2280 notsubset;2284 notsucceeds;2281 notsuperset;2285 nowarmenian;0576 nparen;24A9 nssquare;33B1 nsuperior;207F ntilde;00F1 nu;03BD nuhiragana;306C nukatakana;30CC nukatakanahalfwidth;FF87 nuktabengali;09BC nuktadeva;093C nuktagujarati;0ABC nuktagurmukhi;0A3C numbersign;0023 numbersignmonospace;FF03 numbersignsmall;FE5F numeralsigngreek;0374 numeralsignlowergreek;0375 numero;2116 nun;05E0 nundagesh;FB40 nundageshhebrew;FB40 nunhebrew;05E0 nvsquare;33B5 nwsquare;33BB nyabengali;099E nyadeva;091E nyagujarati;0A9E nyagurmukhi;0A1E o;006F oacute;00F3 oangthai;0E2D obarred;0275 obarredcyrillic;04E9 obarreddieresiscyrillic;04EB obengali;0993 obopomofo;311B obreve;014F ocandradeva;0911 ocandragujarati;0A91 ocandravowelsigndeva;0949 ocandravowelsigngujarati;0AC9 ocaron;01D2 ocircle;24DE ocircumflex;00F4 ocircumflexacute;1ED1 ocircumflexdotbelow;1ED9 ocircumflexgrave;1ED3 ocircumflexhookabove;1ED5 ocircumflextilde;1ED7 ocyrillic;043E odblacute;0151 odblgrave;020D odeva;0913 odieresis;00F6 odieresiscyrillic;04E7 odotbelow;1ECD oe;0153 oekorean;315A ogonek;02DB ogonekcmb;0328 ograve;00F2 ogujarati;0A93 oharmenian;0585 ohiragana;304A ohookabove;1ECF ohorn;01A1 ohornacute;1EDB ohorndotbelow;1EE3 ohorngrave;1EDD ohornhookabove;1EDF ohorntilde;1EE1 ohungarumlaut;0151 oi;01A3 oinvertedbreve;020F okatakana;30AA okatakanahalfwidth;FF75 okorean;3157 olehebrew;05AB omacron;014D omacronacute;1E53 omacrongrave;1E51 omdeva;0950 omega;03C9 omega1;03D6 omegacyrillic;0461 omegalatinclosed;0277 omegaroundcyrillic;047B omegatitlocyrillic;047D omegatonos;03CE omgujarati;0AD0 omicron;03BF omicrontonos;03CC omonospace;FF4F one;0031 onearabic;0661 onebengali;09E7 onecircle;2460 onecircleinversesansserif;278A onedeva;0967 onedotenleader;2024 oneeighth;215B onefitted;F6DC onegujarati;0AE7 onegurmukhi;0A67 onehackarabic;0661 onehalf;00BD onehangzhou;3021 oneideographicparen;3220 oneinferior;2081 onemonospace;FF11 onenumeratorbengali;09F4 oneoldstyle;F731 oneparen;2474 oneperiod;2488 onepersian;06F1 onequarter;00BC oneroman;2170 onesuperior;00B9 onethai;0E51 onethird;2153 oogonek;01EB oogonekmacron;01ED oogurmukhi;0A13 oomatragurmukhi;0A4B oopen;0254 oparen;24AA openbullet;25E6 option;2325 ordfeminine;00AA ordmasculine;00BA orthogonal;221F oshortdeva;0912 oshortvowelsigndeva;094A oslash;00F8 oslashacute;01FF osmallhiragana;3049 osmallkatakana;30A9 osmallkatakanahalfwidth;FF6B ostrokeacute;01FF osuperior;F6F0 otcyrillic;047F otilde;00F5 otildeacute;1E4D otildedieresis;1E4F oubopomofo;3121 overline;203E overlinecenterline;FE4A overlinecmb;0305 overlinedashed;FE49 overlinedblwavy;FE4C overlinewavy;FE4B overscore;00AF ovowelsignbengali;09CB ovowelsigndeva;094B ovowelsigngujarati;0ACB p;0070 paampssquare;3380 paasentosquare;332B pabengali;09AA pacute;1E55 padeva;092A pagedown;21DF pageup;21DE pagujarati;0AAA pagurmukhi;0A2A pahiragana;3071 paiyannoithai;0E2F pakatakana;30D1 palatalizationcyrilliccmb;0484 palochkacyrillic;04C0 pansioskorean;317F paragraph;00B6 parallel;2225 parenleft;0028 parenleftaltonearabic;FD3E parenleftbt;F8ED parenleftex;F8EC parenleftinferior;208D parenleftmonospace;FF08 parenleftsmall;FE59 parenleftsuperior;207D parenlefttp;F8EB parenleftvertical;FE35 parenright;0029 parenrightaltonearabic;FD3F parenrightbt;F8F8 parenrightex;F8F7 parenrightinferior;208E parenrightmonospace;FF09 parenrightsmall;FE5A parenrightsuperior;207E parenrighttp;F8F6 parenrightvertical;FE36 partialdiff;2202 paseqhebrew;05C0 pashtahebrew;0599 pasquare;33A9 patah;05B7 patah11;05B7 patah1d;05B7 patah2a;05B7 patahhebrew;05B7 patahnarrowhebrew;05B7 patahquarterhebrew;05B7 patahwidehebrew;05B7 pazerhebrew;05A1 pbopomofo;3106 pcircle;24DF pdotaccent;1E57 pe;05E4 pecyrillic;043F pedagesh;FB44 pedageshhebrew;FB44 peezisquare;333B pefinaldageshhebrew;FB43 peharabic;067E peharmenian;057A pehebrew;05E4 pehfinalarabic;FB57 pehinitialarabic;FB58 pehiragana;307A pehmedialarabic;FB59 pekatakana;30DA pemiddlehookcyrillic;04A7 perafehebrew;FB4E percent;0025 percentarabic;066A percentmonospace;FF05 percentsmall;FE6A period;002E periodarmenian;0589 periodcentered;00B7 periodhalfwidth;FF61 periodinferior;F6E7 periodmonospace;FF0E periodsmall;FE52 periodsuperior;F6E8 perispomenigreekcmb;0342 perpendicular;22A5 perthousand;2030 peseta;20A7 pfsquare;338A phabengali;09AB phadeva;092B phagujarati;0AAB phagurmukhi;0A2B phi;03C6 phi1;03D5 phieuphacirclekorean;327A phieuphaparenkorean;321A phieuphcirclekorean;326C phieuphkorean;314D phieuphparenkorean;320C philatin;0278 phinthuthai;0E3A phisymbolgreek;03D5 phook;01A5 phophanthai;0E1E phophungthai;0E1C phosamphaothai;0E20 pi;03C0 pieupacirclekorean;3273 pieupaparenkorean;3213 pieupcieuckorean;3176 pieupcirclekorean;3265 pieupkiyeokkorean;3172 pieupkorean;3142 pieupparenkorean;3205 pieupsioskiyeokkorean;3174 pieupsioskorean;3144 pieupsiostikeutkorean;3175 pieupthieuthkorean;3177 pieuptikeutkorean;3173 pihiragana;3074 pikatakana;30D4 pisymbolgreek;03D6 piwrarmenian;0583 plus;002B plusbelowcmb;031F pluscircle;2295 plusminus;00B1 plusmod;02D6 plusmonospace;FF0B plussmall;FE62 plussuperior;207A pmonospace;FF50 pmsquare;33D8 pohiragana;307D pointingindexdownwhite;261F pointingindexleftwhite;261C pointingindexrightwhite;261E pointingindexupwhite;261D pokatakana;30DD poplathai;0E1B postalmark;3012 postalmarkface;3020 pparen;24AB precedes;227A prescription;211E primemod;02B9 primereversed;2035 product;220F projective;2305 prolongedkana;30FC propellor;2318 propersubset;2282 propersuperset;2283 proportion;2237 proportional;221D psi;03C8 psicyrillic;0471 psilipneumatacyrilliccmb;0486 pssquare;33B0 puhiragana;3077 pukatakana;30D7 pvsquare;33B4 pwsquare;33BA q;0071 qadeva;0958 qadmahebrew;05A8 qafarabic;0642 qaffinalarabic;FED6 qafinitialarabic;FED7 qafmedialarabic;FED8 qamats;05B8 qamats10;05B8 qamats1a;05B8 qamats1c;05B8 qamats27;05B8 qamats29;05B8 qamats33;05B8 qamatsde;05B8 qamatshebrew;05B8 qamatsnarrowhebrew;05B8 qamatsqatanhebrew;05B8 qamatsqatannarrowhebrew;05B8 qamatsqatanquarterhebrew;05B8 qamatsqatanwidehebrew;05B8 qamatsquarterhebrew;05B8 qamatswidehebrew;05B8 qarneyparahebrew;059F qbopomofo;3111 qcircle;24E0 qhook;02A0 qmonospace;FF51 qof;05E7 qofdagesh;FB47 qofdageshhebrew;FB47 qofhatafpatah;05E7 05B2 qofhatafpatahhebrew;05E7 05B2 qofhatafsegol;05E7 05B1 qofhatafsegolhebrew;05E7 05B1 qofhebrew;05E7 qofhiriq;05E7 05B4 qofhiriqhebrew;05E7 05B4 qofholam;05E7 05B9 qofholamhebrew;05E7 05B9 qofpatah;05E7 05B7 qofpatahhebrew;05E7 05B7 qofqamats;05E7 05B8 qofqamatshebrew;05E7 05B8 qofqubuts;05E7 05BB qofqubutshebrew;05E7 05BB qofsegol;05E7 05B6 qofsegolhebrew;05E7 05B6 qofsheva;05E7 05B0 qofshevahebrew;05E7 05B0 qoftsere;05E7 05B5 qoftserehebrew;05E7 05B5 qparen;24AC quarternote;2669 qubuts;05BB qubuts18;05BB qubuts25;05BB qubuts31;05BB qubutshebrew;05BB qubutsnarrowhebrew;05BB qubutsquarterhebrew;05BB qubutswidehebrew;05BB question;003F questionarabic;061F questionarmenian;055E questiondown;00BF questiondownsmall;F7BF questiongreek;037E questionmonospace;FF1F questionsmall;F73F quotedbl;0022 quotedblbase;201E quotedblleft;201C quotedblmonospace;FF02 quotedblprime;301E quotedblprimereversed;301D quotedblright;201D quoteleft;2018 quoteleftreversed;201B quotereversed;201B quoteright;2019 quoterightn;0149 quotesinglbase;201A quotesingle;0027 quotesinglemonospace;FF07 r;0072 raarmenian;057C rabengali;09B0 racute;0155 radeva;0930 radical;221A radicalex;F8E5 radoverssquare;33AE radoverssquaredsquare;33AF radsquare;33AD rafe;05BF rafehebrew;05BF ragujarati;0AB0 ragurmukhi;0A30 rahiragana;3089 rakatakana;30E9 rakatakanahalfwidth;FF97 ralowerdiagonalbengali;09F1 ramiddlediagonalbengali;09F0 ramshorn;0264 ratio;2236 rbopomofo;3116 rcaron;0159 rcedilla;0157 rcircle;24E1 rcommaaccent;0157 rdblgrave;0211 rdotaccent;1E59 rdotbelow;1E5B rdotbelowmacron;1E5D referencemark;203B reflexsubset;2286 reflexsuperset;2287 registered;00AE registersans;F8E8 registerserif;F6DA reharabic;0631 reharmenian;0580 rehfinalarabic;FEAE rehiragana;308C rehyehaleflamarabic;0631 FEF3 FE8E 0644 rekatakana;30EC rekatakanahalfwidth;FF9A resh;05E8 reshdageshhebrew;FB48 reshhatafpatah;05E8 05B2 reshhatafpatahhebrew;05E8 05B2 reshhatafsegol;05E8 05B1 reshhatafsegolhebrew;05E8 05B1 reshhebrew;05E8 reshhiriq;05E8 05B4 reshhiriqhebrew;05E8 05B4 reshholam;05E8 05B9 reshholamhebrew;05E8 05B9 reshpatah;05E8 05B7 reshpatahhebrew;05E8 05B7 reshqamats;05E8 05B8 reshqamatshebrew;05E8 05B8 reshqubuts;05E8 05BB reshqubutshebrew;05E8 05BB reshsegol;05E8 05B6 reshsegolhebrew;05E8 05B6 reshsheva;05E8 05B0 reshshevahebrew;05E8 05B0 reshtsere;05E8 05B5 reshtserehebrew;05E8 05B5 reversedtilde;223D reviahebrew;0597 reviamugrashhebrew;0597 revlogicalnot;2310 rfishhook;027E rfishhookreversed;027F rhabengali;09DD rhadeva;095D rho;03C1 rhook;027D rhookturned;027B rhookturnedsuperior;02B5 rhosymbolgreek;03F1 rhotichookmod;02DE rieulacirclekorean;3271 rieulaparenkorean;3211 rieulcirclekorean;3263 rieulhieuhkorean;3140 rieulkiyeokkorean;313A rieulkiyeoksioskorean;3169 rieulkorean;3139 rieulmieumkorean;313B rieulpansioskorean;316C rieulparenkorean;3203 rieulphieuphkorean;313F rieulpieupkorean;313C rieulpieupsioskorean;316B rieulsioskorean;313D rieulthieuthkorean;313E rieultikeutkorean;316A rieulyeorinhieuhkorean;316D rightangle;221F righttackbelowcmb;0319 righttriangle;22BF rihiragana;308A rikatakana;30EA rikatakanahalfwidth;FF98 ring;02DA ringbelowcmb;0325 ringcmb;030A ringhalfleft;02BF ringhalfleftarmenian;0559 ringhalfleftbelowcmb;031C ringhalfleftcentered;02D3 ringhalfright;02BE ringhalfrightbelowcmb;0339 ringhalfrightcentered;02D2 rinvertedbreve;0213 rittorusquare;3351 rlinebelow;1E5F rlongleg;027C rlonglegturned;027A rmonospace;FF52 rohiragana;308D rokatakana;30ED rokatakanahalfwidth;FF9B roruathai;0E23 rparen;24AD rrabengali;09DC rradeva;0931 rragurmukhi;0A5C rreharabic;0691 rrehfinalarabic;FB8D rrvocalicbengali;09E0 rrvocalicdeva;0960 rrvocalicgujarati;0AE0 rrvocalicvowelsignbengali;09C4 rrvocalicvowelsigndeva;0944 rrvocalicvowelsigngujarati;0AC4 rsuperior;F6F1 rtblock;2590 rturned;0279 rturnedsuperior;02B4 ruhiragana;308B rukatakana;30EB rukatakanahalfwidth;FF99 rupeemarkbengali;09F2 rupeesignbengali;09F3 rupiah;F6DD ruthai;0E24 rvocalicbengali;098B rvocalicdeva;090B rvocalicgujarati;0A8B rvocalicvowelsignbengali;09C3 rvocalicvowelsigndeva;0943 rvocalicvowelsigngujarati;0AC3 s;0073 sabengali;09B8 sacute;015B sacutedotaccent;1E65 sadarabic;0635 sadeva;0938 sadfinalarabic;FEBA sadinitialarabic;FEBB sadmedialarabic;FEBC sagujarati;0AB8 sagurmukhi;0A38 sahiragana;3055 sakatakana;30B5 sakatakanahalfwidth;FF7B sallallahoualayhewasallamarabic;FDFA samekh;05E1 samekhdagesh;FB41 samekhdageshhebrew;FB41 samekhhebrew;05E1 saraaathai;0E32 saraaethai;0E41 saraaimaimalaithai;0E44 saraaimaimuanthai;0E43 saraamthai;0E33 saraathai;0E30 saraethai;0E40 saraiileftthai;F886 saraiithai;0E35 saraileftthai;F885 saraithai;0E34 saraothai;0E42 saraueeleftthai;F888 saraueethai;0E37 saraueleftthai;F887 sarauethai;0E36 sarauthai;0E38 sarauuthai;0E39 sbopomofo;3119 scaron;0161 scarondotaccent;1E67 scedilla;015F schwa;0259 schwacyrillic;04D9 schwadieresiscyrillic;04DB schwahook;025A scircle;24E2 scircumflex;015D scommaaccent;0219 sdotaccent;1E61 sdotbelow;1E63 sdotbelowdotaccent;1E69 seagullbelowcmb;033C second;2033 secondtonechinese;02CA section;00A7 seenarabic;0633 seenfinalarabic;FEB2 seeninitialarabic;FEB3 seenmedialarabic;FEB4 segol;05B6 segol13;05B6 segol1f;05B6 segol2c;05B6 segolhebrew;05B6 segolnarrowhebrew;05B6 segolquarterhebrew;05B6 segoltahebrew;0592 segolwidehebrew;05B6 seharmenian;057D sehiragana;305B sekatakana;30BB sekatakanahalfwidth;FF7E semicolon;003B semicolonarabic;061B semicolonmonospace;FF1B semicolonsmall;FE54 semivoicedmarkkana;309C semivoicedmarkkanahalfwidth;FF9F sentisquare;3322 sentosquare;3323 seven;0037 sevenarabic;0667 sevenbengali;09ED sevencircle;2466 sevencircleinversesansserif;2790 sevendeva;096D seveneighths;215E sevengujarati;0AED sevengurmukhi;0A6D sevenhackarabic;0667 sevenhangzhou;3027 sevenideographicparen;3226 seveninferior;2087 sevenmonospace;FF17 sevenoldstyle;F737 sevenparen;247A sevenperiod;248E sevenpersian;06F7 sevenroman;2176 sevensuperior;2077 seventeencircle;2470 seventeenparen;2484 seventeenperiod;2498 seventhai;0E57 sfthyphen;00AD shaarmenian;0577 shabengali;09B6 shacyrillic;0448 shaddaarabic;0651 shaddadammaarabic;FC61 shaddadammatanarabic;FC5E shaddafathaarabic;FC60 shaddafathatanarabic;0651 064B shaddakasraarabic;FC62 shaddakasratanarabic;FC5F shade;2592 shadedark;2593 shadelight;2591 shademedium;2592 shadeva;0936 shagujarati;0AB6 shagurmukhi;0A36 shalshelethebrew;0593 shbopomofo;3115 shchacyrillic;0449 sheenarabic;0634 sheenfinalarabic;FEB6 sheeninitialarabic;FEB7 sheenmedialarabic;FEB8 sheicoptic;03E3 sheqel;20AA sheqelhebrew;20AA sheva;05B0 sheva115;05B0 sheva15;05B0 sheva22;05B0 sheva2e;05B0 shevahebrew;05B0 shevanarrowhebrew;05B0 shevaquarterhebrew;05B0 shevawidehebrew;05B0 shhacyrillic;04BB shimacoptic;03ED shin;05E9 shindagesh;FB49 shindageshhebrew;FB49 shindageshshindot;FB2C shindageshshindothebrew;FB2C shindageshsindot;FB2D shindageshsindothebrew;FB2D shindothebrew;05C1 shinhebrew;05E9 shinshindot;FB2A shinshindothebrew;FB2A shinsindot;FB2B shinsindothebrew;FB2B shook;0282 sigma;03C3 sigma1;03C2 sigmafinal;03C2 sigmalunatesymbolgreek;03F2 sihiragana;3057 sikatakana;30B7 sikatakanahalfwidth;FF7C siluqhebrew;05BD siluqlefthebrew;05BD similar;223C sindothebrew;05C2 siosacirclekorean;3274 siosaparenkorean;3214 sioscieuckorean;317E sioscirclekorean;3266 sioskiyeokkorean;317A sioskorean;3145 siosnieunkorean;317B siosparenkorean;3206 siospieupkorean;317D siostikeutkorean;317C six;0036 sixarabic;0666 sixbengali;09EC sixcircle;2465 sixcircleinversesansserif;278F sixdeva;096C sixgujarati;0AEC sixgurmukhi;0A6C sixhackarabic;0666 sixhangzhou;3026 sixideographicparen;3225 sixinferior;2086 sixmonospace;FF16 sixoldstyle;F736 sixparen;2479 sixperiod;248D sixpersian;06F6 sixroman;2175 sixsuperior;2076 sixteencircle;246F sixteencurrencydenominatorbengali;09F9 sixteenparen;2483 sixteenperiod;2497 sixthai;0E56 slash;002F slashmonospace;FF0F slong;017F slongdotaccent;1E9B smileface;263A smonospace;FF53 sofpasuqhebrew;05C3 softhyphen;00AD softsigncyrillic;044C sohiragana;305D sokatakana;30BD sokatakanahalfwidth;FF7F soliduslongoverlaycmb;0338 solidusshortoverlaycmb;0337 sorusithai;0E29 sosalathai;0E28 sosothai;0E0B sosuathai;0E2A space;0020 spacehackarabic;0020 spade;2660 spadesuitblack;2660 spadesuitwhite;2664 sparen;24AE squarebelowcmb;033B squarecc;33C4 squarecm;339D squarediagonalcrosshatchfill;25A9 squarehorizontalfill;25A4 squarekg;338F squarekm;339E squarekmcapital;33CE squareln;33D1 squarelog;33D2 squaremg;338E squaremil;33D5 squaremm;339C squaremsquared;33A1 squareorthogonalcrosshatchfill;25A6 squareupperlefttolowerrightfill;25A7 squareupperrighttolowerleftfill;25A8 squareverticalfill;25A5 squarewhitewithsmallblack;25A3 srsquare;33DB ssabengali;09B7 ssadeva;0937 ssagujarati;0AB7 ssangcieuckorean;3149 ssanghieuhkorean;3185 ssangieungkorean;3180 ssangkiyeokkorean;3132 ssangnieunkorean;3165 ssangpieupkorean;3143 ssangsioskorean;3146 ssangtikeutkorean;3138 ssuperior;F6F2 sterling;00A3 sterlingmonospace;FFE1 strokelongoverlaycmb;0336 strokeshortoverlaycmb;0335 subset;2282 subsetnotequal;228A subsetorequal;2286 succeeds;227B suchthat;220B suhiragana;3059 sukatakana;30B9 sukatakanahalfwidth;FF7D sukunarabic;0652 summation;2211 sun;263C superset;2283 supersetnotequal;228B supersetorequal;2287 svsquare;33DC syouwaerasquare;337C t;0074 tabengali;09A4 tackdown;22A4 tackleft;22A3 tadeva;0924 tagujarati;0AA4 tagurmukhi;0A24 taharabic;0637 tahfinalarabic;FEC2 tahinitialarabic;FEC3 tahiragana;305F tahmedialarabic;FEC4 taisyouerasquare;337D takatakana;30BF takatakanahalfwidth;FF80 tatweelarabic;0640 tau;03C4 tav;05EA tavdages;FB4A tavdagesh;FB4A tavdageshhebrew;FB4A tavhebrew;05EA tbar;0167 tbopomofo;310A tcaron;0165 tccurl;02A8 tcedilla;0163 tcheharabic;0686 tchehfinalarabic;FB7B tchehinitialarabic;FB7C tchehmedialarabic;FB7D tchehmeeminitialarabic;FB7C FEE4 tcircle;24E3 tcircumflexbelow;1E71 tcommaaccent;0163 tdieresis;1E97 tdotaccent;1E6B tdotbelow;1E6D tecyrillic;0442 tedescendercyrillic;04AD teharabic;062A tehfinalarabic;FE96 tehhahinitialarabic;FCA2 tehhahisolatedarabic;FC0C tehinitialarabic;FE97 tehiragana;3066 tehjeeminitialarabic;FCA1 tehjeemisolatedarabic;FC0B tehmarbutaarabic;0629 tehmarbutafinalarabic;FE94 tehmedialarabic;FE98 tehmeeminitialarabic;FCA4 tehmeemisolatedarabic;FC0E tehnoonfinalarabic;FC73 tekatakana;30C6 tekatakanahalfwidth;FF83 telephone;2121 telephoneblack;260E telishagedolahebrew;05A0 telishaqetanahebrew;05A9 tencircle;2469 tenideographicparen;3229 tenparen;247D tenperiod;2491 tenroman;2179 tesh;02A7 tet;05D8 tetdagesh;FB38 tetdageshhebrew;FB38 tethebrew;05D8 tetsecyrillic;04B5 tevirhebrew;059B tevirlefthebrew;059B thabengali;09A5 thadeva;0925 thagujarati;0AA5 thagurmukhi;0A25 thalarabic;0630 thalfinalarabic;FEAC thanthakhatlowleftthai;F898 thanthakhatlowrightthai;F897 thanthakhatthai;0E4C thanthakhatupperleftthai;F896 theharabic;062B thehfinalarabic;FE9A thehinitialarabic;FE9B thehmedialarabic;FE9C thereexists;2203 therefore;2234 theta;03B8 theta1;03D1 thetasymbolgreek;03D1 thieuthacirclekorean;3279 thieuthaparenkorean;3219 thieuthcirclekorean;326B thieuthkorean;314C thieuthparenkorean;320B thirteencircle;246C thirteenparen;2480 thirteenperiod;2494 thonangmonthothai;0E11 thook;01AD thophuthaothai;0E12 thorn;00FE thothahanthai;0E17 thothanthai;0E10 thothongthai;0E18 thothungthai;0E16 thousandcyrillic;0482 thousandsseparatorarabic;066C thousandsseparatorpersian;066C three;0033 threearabic;0663 threebengali;09E9 threecircle;2462 threecircleinversesansserif;278C threedeva;0969 threeeighths;215C threegujarati;0AE9 threegurmukhi;0A69 threehackarabic;0663 threehangzhou;3023 threeideographicparen;3222 threeinferior;2083 threemonospace;FF13 threenumeratorbengali;09F6 threeoldstyle;F733 threeparen;2476 threeperiod;248A threepersian;06F3 threequarters;00BE threequartersemdash;F6DE threeroman;2172 threesuperior;00B3 threethai;0E53 thzsquare;3394 tihiragana;3061 tikatakana;30C1 tikatakanahalfwidth;FF81 tikeutacirclekorean;3270 tikeutaparenkorean;3210 tikeutcirclekorean;3262 tikeutkorean;3137 tikeutparenkorean;3202 tilde;02DC tildebelowcmb;0330 tildecmb;0303 tildecomb;0303 tildedoublecmb;0360 tildeoperator;223C tildeoverlaycmb;0334 tildeverticalcmb;033E timescircle;2297 tipehahebrew;0596 tipehalefthebrew;0596 tippigurmukhi;0A70 titlocyrilliccmb;0483 tiwnarmenian;057F tlinebelow;1E6F tmonospace;FF54 toarmenian;0569 tohiragana;3068 tokatakana;30C8 tokatakanahalfwidth;FF84 tonebarextrahighmod;02E5 tonebarextralowmod;02E9 tonebarhighmod;02E6 tonebarlowmod;02E8 tonebarmidmod;02E7 tonefive;01BD tonesix;0185 tonetwo;01A8 tonos;0384 tonsquare;3327 topatakthai;0E0F tortoiseshellbracketleft;3014 tortoiseshellbracketleftsmall;FE5D tortoiseshellbracketleftvertical;FE39 tortoiseshellbracketright;3015 tortoiseshellbracketrightsmall;FE5E tortoiseshellbracketrightvertical;FE3A totaothai;0E15 tpalatalhook;01AB tparen;24AF trademark;2122 trademarksans;F8EA trademarkserif;F6DB tretroflexhook;0288 triagdn;25BC triaglf;25C4 triagrt;25BA triagup;25B2 ts;02A6 tsadi;05E6 tsadidagesh;FB46 tsadidageshhebrew;FB46 tsadihebrew;05E6 tsecyrillic;0446 tsere;05B5 tsere12;05B5 tsere1e;05B5 tsere2b;05B5 tserehebrew;05B5 tserenarrowhebrew;05B5 tserequarterhebrew;05B5 tserewidehebrew;05B5 tshecyrillic;045B tsuperior;F6F3 ttabengali;099F ttadeva;091F ttagujarati;0A9F ttagurmukhi;0A1F tteharabic;0679 ttehfinalarabic;FB67 ttehinitialarabic;FB68 ttehmedialarabic;FB69 tthabengali;09A0 tthadeva;0920 tthagujarati;0AA0 tthagurmukhi;0A20 tturned;0287 tuhiragana;3064 tukatakana;30C4 tukatakanahalfwidth;FF82 tusmallhiragana;3063 tusmallkatakana;30C3 tusmallkatakanahalfwidth;FF6F twelvecircle;246B twelveparen;247F twelveperiod;2493 twelveroman;217B twentycircle;2473 twentyhangzhou;5344 twentyparen;2487 twentyperiod;249B two;0032 twoarabic;0662 twobengali;09E8 twocircle;2461 twocircleinversesansserif;278B twodeva;0968 twodotenleader;2025 twodotleader;2025 twodotleadervertical;FE30 twogujarati;0AE8 twogurmukhi;0A68 twohackarabic;0662 twohangzhou;3022 twoideographicparen;3221 twoinferior;2082 twomonospace;FF12 twonumeratorbengali;09F5 twooldstyle;F732 twoparen;2475 twoperiod;2489 twopersian;06F2 tworoman;2171 twostroke;01BB twosuperior;00B2 twothai;0E52 twothirds;2154 u;0075 uacute;00FA ubar;0289 ubengali;0989 ubopomofo;3128 ubreve;016D ucaron;01D4 ucircle;24E4 ucircumflex;00FB ucircumflexbelow;1E77 ucyrillic;0443 udattadeva;0951 udblacute;0171 udblgrave;0215 udeva;0909 udieresis;00FC udieresisacute;01D8 udieresisbelow;1E73 udieresiscaron;01DA udieresiscyrillic;04F1 udieresisgrave;01DC udieresismacron;01D6 udotbelow;1EE5 ugrave;00F9 ugujarati;0A89 ugurmukhi;0A09 uhiragana;3046 uhookabove;1EE7 uhorn;01B0 uhornacute;1EE9 uhorndotbelow;1EF1 uhorngrave;1EEB uhornhookabove;1EED uhorntilde;1EEF uhungarumlaut;0171 uhungarumlautcyrillic;04F3 uinvertedbreve;0217 ukatakana;30A6 ukatakanahalfwidth;FF73 ukcyrillic;0479 ukorean;315C umacron;016B umacroncyrillic;04EF umacrondieresis;1E7B umatragurmukhi;0A41 umonospace;FF55 underscore;005F underscoredbl;2017 underscoremonospace;FF3F underscorevertical;FE33 underscorewavy;FE4F union;222A universal;2200 uogonek;0173 uparen;24B0 upblock;2580 upperdothebrew;05C4 upsilon;03C5 upsilondieresis;03CB upsilondieresistonos;03B0 upsilonlatin;028A upsilontonos;03CD uptackbelowcmb;031D uptackmod;02D4 uragurmukhi;0A73 uring;016F ushortcyrillic;045E usmallhiragana;3045 usmallkatakana;30A5 usmallkatakanahalfwidth;FF69 ustraightcyrillic;04AF ustraightstrokecyrillic;04B1 utilde;0169 utildeacute;1E79 utildebelow;1E75 uubengali;098A uudeva;090A uugujarati;0A8A uugurmukhi;0A0A uumatragurmukhi;0A42 uuvowelsignbengali;09C2 uuvowelsigndeva;0942 uuvowelsigngujarati;0AC2 uvowelsignbengali;09C1 uvowelsigndeva;0941 uvowelsigngujarati;0AC1 v;0076 vadeva;0935 vagujarati;0AB5 vagurmukhi;0A35 vakatakana;30F7 vav;05D5 vavdagesh;FB35 vavdagesh65;FB35 vavdageshhebrew;FB35 vavhebrew;05D5 vavholam;FB4B vavholamhebrew;FB4B vavvavhebrew;05F0 vavyodhebrew;05F1 vcircle;24E5 vdotbelow;1E7F vecyrillic;0432 veharabic;06A4 vehfinalarabic;FB6B vehinitialarabic;FB6C vehmedialarabic;FB6D vekatakana;30F9 venus;2640 verticalbar;007C verticallineabovecmb;030D verticallinebelowcmb;0329 verticallinelowmod;02CC verticallinemod;02C8 vewarmenian;057E vhook;028B vikatakana;30F8 viramabengali;09CD viramadeva;094D viramagujarati;0ACD visargabengali;0983 visargadeva;0903 visargagujarati;0A83 vmonospace;FF56 voarmenian;0578 voicediterationhiragana;309E voicediterationkatakana;30FE voicedmarkkana;309B voicedmarkkanahalfwidth;FF9E vokatakana;30FA vparen;24B1 vtilde;1E7D vturned;028C vuhiragana;3094 vukatakana;30F4 w;0077 wacute;1E83 waekorean;3159 wahiragana;308F wakatakana;30EF wakatakanahalfwidth;FF9C wakorean;3158 wasmallhiragana;308E wasmallkatakana;30EE wattosquare;3357 wavedash;301C wavyunderscorevertical;FE34 wawarabic;0648 wawfinalarabic;FEEE wawhamzaabovearabic;0624 wawhamzaabovefinalarabic;FE86 wbsquare;33DD wcircle;24E6 wcircumflex;0175 wdieresis;1E85 wdotaccent;1E87 wdotbelow;1E89 wehiragana;3091 weierstrass;2118 wekatakana;30F1 wekorean;315E weokorean;315D wgrave;1E81 whitebullet;25E6 whitecircle;25CB whitecircleinverse;25D9 whitecornerbracketleft;300E whitecornerbracketleftvertical;FE43 whitecornerbracketright;300F whitecornerbracketrightvertical;FE44 whitediamond;25C7 whitediamondcontainingblacksmalldiamond;25C8 whitedownpointingsmalltriangle;25BF whitedownpointingtriangle;25BD whiteleftpointingsmalltriangle;25C3 whiteleftpointingtriangle;25C1 whitelenticularbracketleft;3016 whitelenticularbracketright;3017 whiterightpointingsmalltriangle;25B9 whiterightpointingtriangle;25B7 whitesmallsquare;25AB whitesmilingface;263A whitesquare;25A1 whitestar;2606 whitetelephone;260F whitetortoiseshellbracketleft;3018 whitetortoiseshellbracketright;3019 whiteuppointingsmalltriangle;25B5 whiteuppointingtriangle;25B3 wihiragana;3090 wikatakana;30F0 wikorean;315F wmonospace;FF57 wohiragana;3092 wokatakana;30F2 wokatakanahalfwidth;FF66 won;20A9 wonmonospace;FFE6 wowaenthai;0E27 wparen;24B2 wring;1E98 wsuperior;02B7 wturned;028D wynn;01BF x;0078 xabovecmb;033D xbopomofo;3112 xcircle;24E7 xdieresis;1E8D xdotaccent;1E8B xeharmenian;056D xi;03BE xmonospace;FF58 xparen;24B3 xsuperior;02E3 y;0079 yaadosquare;334E yabengali;09AF yacute;00FD yadeva;092F yaekorean;3152 yagujarati;0AAF yagurmukhi;0A2F yahiragana;3084 yakatakana;30E4 yakatakanahalfwidth;FF94 yakorean;3151 yamakkanthai;0E4E yasmallhiragana;3083 yasmallkatakana;30E3 yasmallkatakanahalfwidth;FF6C yatcyrillic;0463 ycircle;24E8 ycircumflex;0177 ydieresis;00FF ydotaccent;1E8F ydotbelow;1EF5 yeharabic;064A yehbarreearabic;06D2 yehbarreefinalarabic;FBAF yehfinalarabic;FEF2 yehhamzaabovearabic;0626 yehhamzaabovefinalarabic;FE8A yehhamzaaboveinitialarabic;FE8B yehhamzaabovemedialarabic;FE8C yehinitialarabic;FEF3 yehmedialarabic;FEF4 yehmeeminitialarabic;FCDD yehmeemisolatedarabic;FC58 yehnoonfinalarabic;FC94 yehthreedotsbelowarabic;06D1 yekorean;3156 yen;00A5 yenmonospace;FFE5 yeokorean;3155 yeorinhieuhkorean;3186 yerahbenyomohebrew;05AA yerahbenyomolefthebrew;05AA yericyrillic;044B yerudieresiscyrillic;04F9 yesieungkorean;3181 yesieungpansioskorean;3183 yesieungsioskorean;3182 yetivhebrew;059A ygrave;1EF3 yhook;01B4 yhookabove;1EF7 yiarmenian;0575 yicyrillic;0457 yikorean;3162 yinyang;262F yiwnarmenian;0582 ymonospace;FF59 yod;05D9 yoddagesh;FB39 yoddageshhebrew;FB39 yodhebrew;05D9 yodyodhebrew;05F2 yodyodpatahhebrew;FB1F yohiragana;3088 yoikorean;3189 yokatakana;30E8 yokatakanahalfwidth;FF96 yokorean;315B yosmallhiragana;3087 yosmallkatakana;30E7 yosmallkatakanahalfwidth;FF6E yotgreek;03F3 yoyaekorean;3188 yoyakorean;3187 yoyakthai;0E22 yoyingthai;0E0D yparen;24B4 ypogegrammeni;037A ypogegrammenigreekcmb;0345 yr;01A6 yring;1E99 ysuperior;02B8 ytilde;1EF9 yturned;028E yuhiragana;3086 yuikorean;318C yukatakana;30E6 yukatakanahalfwidth;FF95 yukorean;3160 yusbigcyrillic;046B yusbigiotifiedcyrillic;046D yuslittlecyrillic;0467 yuslittleiotifiedcyrillic;0469 yusmallhiragana;3085 yusmallkatakana;30E5 yusmallkatakanahalfwidth;FF6D yuyekorean;318B yuyeokorean;318A yyabengali;09DF yyadeva;095F z;007A zaarmenian;0566 zacute;017A zadeva;095B zagurmukhi;0A5B zaharabic;0638 zahfinalarabic;FEC6 zahinitialarabic;FEC7 zahiragana;3056 zahmedialarabic;FEC8 zainarabic;0632 zainfinalarabic;FEB0 zakatakana;30B6 zaqefgadolhebrew;0595 zaqefqatanhebrew;0594 zarqahebrew;0598 zayin;05D6 zayindagesh;FB36 zayindageshhebrew;FB36 zayinhebrew;05D6 zbopomofo;3117 zcaron;017E zcircle;24E9 zcircumflex;1E91 zcurl;0291 zdot;017C zdotaccent;017C zdotbelow;1E93 zecyrillic;0437 zedescendercyrillic;0499 zedieresiscyrillic;04DF zehiragana;305C zekatakana;30BC zero;0030 zeroarabic;0660 zerobengali;09E6 zerodeva;0966 zerogujarati;0AE6 zerogurmukhi;0A66 zerohackarabic;0660 zeroinferior;2080 zeromonospace;FF10 zerooldstyle;F730 zeropersian;06F0 zerosuperior;2070 zerothai;0E50 zerowidthjoiner;FEFF zerowidthnonjoiner;200C zerowidthspace;200B zeta;03B6 zhbopomofo;3113 zhearmenian;056A zhebrevecyrillic;04C2 zhecyrillic;0436 zhedescendercyrillic;0497 zhedieresiscyrillic;04DD zihiragana;3058 zikatakana;30B8 zinorhebrew;05AE zlinebelow;1E95 zmonospace;FF5A zohiragana;305E zokatakana;30BE zparen;24B5 zretroflexhook;0290 zstroke;01B6 zuhiragana;305A zukatakana;30BA a100;275E a101;2761 a102;2762 a103;2763 a104;2764 a105;2710 a106;2765 a107;2766 a108;2767 a109;2660 a10;2721 a110;2665 a111;2666 a112;2663 a117;2709 a118;2708 a119;2707 a11;261B a120;2460 a121;2461 a122;2462 a123;2463 a124;2464 a125;2465 a126;2466 a127;2467 a128;2468 a129;2469 a12;261E a130;2776 a131;2777 a132;2778 a133;2779 a134;277A a135;277B a136;277C a137;277D a138;277E a139;277F a13;270C a140;2780 a141;2781 a142;2782 a143;2783 a144;2784 a145;2785 a146;2786 a147;2787 a148;2788 a149;2789 a14;270D a150;278A a151;278B a152;278C a153;278D a154;278E a155;278F a156;2790 a157;2791 a158;2792 a159;2793 a15;270E a160;2794 a161;2192 a162;27A3 a163;2194 a164;2195 a165;2799 a166;279B a167;279C a168;279D a169;279E a16;270F a170;279F a171;27A0 a172;27A1 a173;27A2 a174;27A4 a175;27A5 a176;27A6 a177;27A7 a178;27A8 a179;27A9 a17;2711 a180;27AB a181;27AD a182;27AF a183;27B2 a184;27B3 a185;27B5 a186;27B8 a187;27BA a188;27BB a189;27BC a18;2712 a190;27BD a191;27BE a192;279A a193;27AA a194;27B6 a195;27B9 a196;2798 a197;27B4 a198;27B7 a199;27AC a19;2713 a1;2701 a200;27AE a201;27B1 a202;2703 a203;2750 a204;2752 a205;276E a206;2770 a20;2714 a21;2715 a22;2716 a23;2717 a24;2718 a25;2719 a26;271A a27;271B a28;271C a29;2722 a2;2702 a30;2723 a31;2724 a32;2725 a33;2726 a34;2727 a35;2605 a36;2729 a37;272A a38;272B a39;272C a3;2704 a40;272D a41;272E a42;272F a43;2730 a44;2731 a45;2732 a46;2733 a47;2734 a48;2735 a49;2736 a4;260E a50;2737 a51;2738 a52;2739 a53;273A a54;273B a55;273C a56;273D a57;273E a58;273F a59;2740 a5;2706 a60;2741 a61;2742 a62;2743 a63;2744 a64;2745 a65;2746 a66;2747 a67;2748 a68;2749 a69;274A a6;271D a70;274B a71;25CF a72;274D a73;25A0 a74;274F a75;2751 a76;25B2 a77;25BC a78;25C6 a79;2756 a7;271E a81;25D7 a82;2758 a83;2759 a84;275A a85;276F a86;2771 a87;2772 a88;2773 a89;2768 a8;271F a90;2769 a91;276C a92;276D a93;276A a94;276B a95;2774 a96;2775 a97;275B a98;275C a99;275D a9;2720 """ # string table management # class StringTable: def __init__( self, name_list, master_table_name ): self.names = name_list self.master_table = master_table_name self.indices = {} index = 0 for name in name_list: self.indices[name] = index index += len( name ) + 1 self.total = index def dump( self, file ): write = file.write write( " static const char " + self.master_table + "[" + repr( self.total ) + "] =\n" ) write( " {\n" ) line = "" for name in self.names: line += " '" line += string.join( ( re.findall( ".", name ) ), "','" ) line += "', 0,\n" write( line + " };\n\n\n" ) def dump_sublist( self, file, table_name, macro_name, sublist ): write = file.write write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" ) write( " /* Values are offsets into the `" + self.master_table + "' table */\n\n" ) write( " static const short " + table_name + "[" + macro_name + "] =\n" ) write( " {\n" ) line = " " comma = "" col = 0 for name in sublist: line += comma line += "%4d" % self.indices[name] col += 1 comma = "," if col == 14: col = 0 comma = ",\n " write( line + "\n };\n\n\n" ) # We now store the Adobe Glyph List in compressed form. The list is put # into a data structure called `trie' (because it has a tree-like # appearance). Consider, for example, that you want to store the # following name mapping: # # A => 1 # Aacute => 6 # Abalon => 2 # Abstract => 4 # # It is possible to store the entries as follows. # # A => 1 # | # +-acute => 6 # | # +-b # | # +-alon => 2 # | # +-stract => 4 # # We see that each node in the trie has: # # - one or more `letters' # - an optional value # - zero or more child nodes # # The first step is to call # # root = StringNode( "", 0 ) # for word in map.values(): # root.add( word, map[word] ) # # which creates a large trie where each node has only one children. # # Executing # # root = root.optimize() # # optimizes the trie by merging the letters of successive nodes whenever # possible. # # Each node of the trie is stored as follows. # # - First the node's letter, according to the following scheme. We # use the fact that in the AGL no name contains character codes > 127. # # name bitsize description # ---------------------------------------------------------------- # notlast 1 Set to 1 if this is not the last letter # in the word. # ascii 7 The letter's ASCII value. # # - The letter is followed by a children count and the value of the # current key (if any). Again we can do some optimization because all # AGL entries are from the BMP; this means that 16 bits are sufficient # to store its Unicode values. Additionally, no node has more than # 127 children. # # name bitsize description # ----------------------------------------- # hasvalue 1 Set to 1 if a 16-bit Unicode value follows. # num_children 7 Number of children. Can be 0 only if # `hasvalue' is set to 1. # value 16 Optional Unicode value. # # - A node is finished by a list of 16bit absolute offsets to the # children, which must be sorted in increasing order of their first # letter. # # For simplicity, all 16bit quantities are stored in big-endian order. # # The root node has first letter = 0, and no value. # class StringNode: def __init__( self, letter, value ): self.letter = letter self.value = value self.children = {} def __cmp__( self, other ): return ord( self.letter[0] ) - ord( other.letter[0] ) def add( self, word, value ): if len( word ) == 0: self.value = value return letter = word[0] word = word[1:] if self.children.has_key( letter ): child = self.children[letter] else: child = StringNode( letter, 0 ) self.children[letter] = child child.add( word, value ) def optimize( self ): # optimize all children first children = self.children.values() self.children = {} for child in children: self.children[child.letter[0]] = child.optimize() # don't optimize if there's a value, # if we don't have any child or if we # have more than one child if ( self.value != 0 ) or ( not children ) or len( children ) > 1: return self child = children[0] self.letter += child.letter self.value = child.value self.children = child.children return self def dump_debug( self, write, margin ): # this is used during debugging line = margin + "+-" if len( self.letter ) == 0: line += "<NOLETTER>" else: line += self.letter if self.value: line += " => " + repr( self.value ) write( line + "\n" ) if self.children: margin += "| " for child in self.children.values(): child.dump_debug( write, margin ) def locate( self, index ): self.index = index if len( self.letter ) > 0: index += len( self.letter ) + 1 else: index += 2 if self.value != 0: index += 2 children = self.children.values() children.sort() index += 2 * len( children ) for child in children: index = child.locate( index ) return index def store( self, storage ): # write the letters l = len( self.letter ) if l == 0: storage += struct.pack( "B", 0 ) else: for n in range( l ): val = ord( self.letter[n] ) if n < l - 1: val += 128 storage += struct.pack( "B", val ) # write the count children = self.children.values() children.sort() count = len( children ) if self.value != 0: storage += struct.pack( "!BH", count + 128, self.value ) else: storage += struct.pack( "B", count ) for child in children: storage += struct.pack( "!H", child.index ) for child in children: storage = child.store( storage ) return storage def adobe_glyph_values(): """return the list of glyph names and their unicode values""" lines = string.split( adobe_glyph_list, '\n' ) glyphs = [] values = [] for line in lines: if line: fields = string.split( line, ';' ) # print fields[1] + ' - ' + fields[0] subfields = string.split( fields[1], ' ' ) if len( subfields ) == 1: glyphs.append( fields[0] ) values.append( fields[1] ) return glyphs, values def filter_glyph_names( alist, filter ): """filter `alist' by taking _out_ all glyph names that are in `filter'""" count = 0 extras = [] for name in alist: try: filtered_index = filter.index( name ) except: extras.append( name ) return extras def dump_encoding( file, encoding_name, encoding_list ): """dump a given encoding""" write = file.write write( " /* the following are indices into the SID name table */\n" ) write( " static const unsigned short " + encoding_name + "[" + repr( len( encoding_list ) ) + "] =\n" ) write( " {\n" ) line = " " comma = "" col = 0 for value in encoding_list: line += comma line += "%3d" % value comma = "," col += 1 if col == 16: col = 0 comma = ",\n " write( line + "\n };\n\n\n" ) def dump_array( the_array, write, array_name ): """dumps a given encoding""" write( " static const unsigned char " + array_name + "[" + repr( len( the_array ) ) + "L] =\n" ) write( " {\n" ) line = "" comma = " " col = 0 for value in the_array: line += comma line += "%3d" % ord( value ) comma = "," col += 1 if col == 16: col = 0 comma = ",\n " if len( line ) > 1024: write( line ) line = "" write( line + "\n };\n\n\n" ) def main(): """main program body""" if len( sys.argv ) != 2: print __doc__ % sys.argv[0] sys.exit( 1 ) file = open( sys.argv[1], "w\n" ) write = file.write count_sid = len( sid_standard_names ) # `mac_extras' contains the list of glyph names in the Macintosh standard # encoding which are not in the SID Standard Names. # mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names ) # `base_list' contains the names of our final glyph names table. # It consists of the `mac_extras' glyph names, followed by the SID # standard names. # mac_extras_count = len( mac_extras ) base_list = mac_extras + sid_standard_names write( "/***************************************************************************/\n" ) write( "/* */\n" ) write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) ) write( "/* */\n" ) write( "/* PostScript glyph names. */\n" ) write( "/* */\n" ) write( "/* Copyright 2005, 2008, 2011 by */\n" ) write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" ) write( "/* */\n" ) write( "/* This file is part of the FreeType project, and may only be used, */\n" ) write( "/* modified, and distributed under the terms of the FreeType project */\n" ) write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" ) write( "/* this file you indicate that you have read the license and */\n" ) write( "/* understand and accept it fully. */\n" ) write( "/* */\n" ) write( "/***************************************************************************/\n" ) write( "\n" ) write( "\n" ) write( " /* This file has been generated automatically -- do not edit! */\n" ) write( "\n" ) write( "\n" ) # dump final glyph list (mac extras + sid standard names) # st = StringTable( base_list, "ft_standard_glyph_names" ) st.dump( file ) st.dump_sublist( file, "ft_mac_names", "FT_NUM_MAC_NAMES", mac_standard_names ) st.dump_sublist( file, "ft_sid_names", "FT_NUM_SID_NAMES", sid_standard_names ) dump_encoding( file, "t1_standard_encoding", t1_standard_encoding ) dump_encoding( file, "t1_expert_encoding", t1_expert_encoding ) # dump the AGL in its compressed form # agl_glyphs, agl_values = adobe_glyph_values() dict = StringNode( "", 0 ) for g in range( len( agl_glyphs ) ): dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) ) dict = dict.optimize() dict_len = dict.locate( 0 ) dict_array = dict.store( "" ) write( """\ /* * This table is a compressed version of the Adobe Glyph List (AGL), * optimized for efficient searching. It has been generated by the * `glnames.py' python script located in the `src/tools' directory. * * The lookup function to get the Unicode value for a given string * is defined below the table. */ #ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST """ ) dump_array( dict_array, write, "ft_adobe_glyph_list" ) # write the lookup routine now # write( """\ /* * This function searches the compressed table efficiently. */ static unsigned long ft_get_adobe_glyph_index( const char* name, const char* limit ) { int c = 0; int count, min, max; const unsigned char* p = ft_adobe_glyph_list; if ( name == 0 || name >= limit ) goto NotFound; c = *name++; count = p[1]; p += 2; min = 0; max = count; while ( min < max ) { int mid = ( min + max ) >> 1; const unsigned char* q = p + mid * 2; int c2; q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] ); c2 = q[0] & 127; if ( c2 == c ) { p = q; goto Found; } if ( c2 < c ) min = mid + 1; else max = mid; } goto NotFound; Found: for (;;) { /* assert (*p & 127) == c */ if ( name >= limit ) { if ( (p[0] & 128) == 0 && (p[1] & 128) != 0 ) return (unsigned long)( ( (int)p[2] << 8 ) | p[3] ); goto NotFound; } c = *name++; if ( p[0] & 128 ) { p++; if ( c != (p[0] & 127) ) goto NotFound; continue; } p++; count = p[0] & 127; if ( p[0] & 128 ) p += 2; p++; for ( ; count > 0; count--, p += 2 ) { int offset = ( (int)p[0] << 8 ) | p[1]; const unsigned char* q = ft_adobe_glyph_list + offset; if ( c == ( q[0] & 127 ) ) { p = q; goto NextIter; } } goto NotFound; NextIter: ; } NotFound: return 0; } #endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */ """ ) if 0: # generate unit test, or don't # # now write the unit test to check that everything works OK # write( "#ifdef TEST\n\n" ) write( "static const char* const the_names[] = {\n" ) for name in agl_glyphs: write( ' "' + name + '",\n' ) write( " 0\n};\n" ) write( "static const unsigned long the_values[] = {\n" ) for val in agl_values: write( ' 0x' + val + ',\n' ) write( " 0\n};\n" ) write( """ #include <stdlib.h> #include <stdio.h> int main( void ) { int result = 0; const char* const* names = the_names; const unsigned long* values = the_values; for ( ; *names; names++, values++ ) { const char* name = *names; unsigned long reference = *values; unsigned long value; value = ft_get_adobe_glyph_index( name, name + strlen( name ) ); if ( value != reference ) { result = 1; fprintf( stderr, "name '%s' => %04x instead of %04x\\n", name, value, reference ); } } return result; } """ ) write( "#endif /* TEST */\n" ) write("\n/* END */\n") # Now run the main routine # main() # END
[ [ 8, 0, 0.0042, 0.0016, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0055, 0.0002, 0, 0.66, 0.0714, 509, 0, 5, 0, 0, 509, 0, 0 ], [ 14, 0, 0.0173, 0.0195, 0, 0.6...
[ "\"\"\"\\\n\nusage: %s <output-file>\n\n This python script generates the glyph names tables defined in the\n `psnames' module.\n\n Its single argument is the name of the header file to be created.", "import sys, string, struct, re, os.path", "mac_standard_names = \\\n[\n # 0\n \".notdef\", \".null\", \"no...
# compute arctangent table for CORDIC computations in fttrigon.c import sys, math #units = 64*65536.0 # don't change !! units = 256 scale = units/math.pi shrink = 1.0 comma = "" def calc_val( x ): global units, shrink angle = math.atan(x) shrink = shrink * math.cos(angle) return angle/math.pi * units def print_val( n, x ): global comma lo = int(x) hi = lo + 1 alo = math.atan(lo) ahi = math.atan(hi) ax = math.atan(2.0**n) errlo = abs( alo - ax ) errhi = abs( ahi - ax ) if ( errlo < errhi ): hi = lo sys.stdout.write( comma + repr( int(hi) ) ) comma = ", " print "" print "table of arctan( 1/2^n ) for PI = " + repr(units/65536.0) + " units" # compute range of "i" r = [-1] r = r + range(32) for n in r: if n >= 0: x = 1.0/(2.0**n) # tangent value else: x = 2.0**(-n) angle = math.atan(x) # arctangent angle2 = angle*scale # arctangent in FT_Angle units # determine which integer value for angle gives the best tangent lo = int(angle2) hi = lo + 1 tlo = math.tan(lo/scale) thi = math.tan(hi/scale) errlo = abs( tlo - x ) errhi = abs( thi - x ) angle2 = hi if errlo < errhi: angle2 = lo if angle2 <= 0: break sys.stdout.write( comma + repr( int(angle2) ) ) comma = ", " shrink = shrink * math.cos( angle2/scale) print print "shrink factor = " + repr( shrink ) print "shrink factor 2 = " + repr( shrink * (2.0**32) ) print "expansion factor = " + repr(1/shrink) print ""
[ [ 1, 0, 0.026, 0.013, 0, 0.66, 0, 509, 0, 2, 0, 0, 509, 0, 0 ], [ 14, 0, 0.0649, 0.013, 0, 0.66, 0.0714, 805, 1, 0, 0, 0, 0, 1, 0 ], [ 14, 0, 0.0779, 0.013, 0, 0.66...
[ "import sys, math", "units = 256", "scale = units/math.pi", "shrink = 1.0", "comma = \"\"", "def calc_val( x ):\n global units, shrink\n angle = math.atan(x)\n shrink = shrink * math.cos(angle)\n return angle/math.pi * units", " angle = math.atan(x)", " shrink = shrink * math.co...
#!/usr/bin/env python # # Check trace components in FreeType 2 source. # Author: suzuki toshiya, 2009 # # This code is explicitly into the public domain. import sys import os import re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [ "src" ] TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ] # -------------------------------------------------------------- # Parse command line options # for i in range( 1, len( sys.argv ) ): if sys.argv[i].startswith( "--help" ): print "Usage: %s [option]" % sys.argv[0] print "Search used-but-defined and defined-but-not-used trace_XXX macros" print "" print " --help:" print " Show this help" print "" print " --src-dirs=dir1:dir2:..." print " Specify the directories of C source files to be checked" print " Default is %s" % ":".join( SRC_FILE_DIRS ) print "" print " --def-files=file1:file2:..." print " Specify the header files including FT_TRACE_DEF()" print " Default is %s" % ":".join( TRACE_DEF_FILES ) print "" exit(0) if sys.argv[i].startswith( "--src-dirs=" ): SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" ) elif sys.argv[i].startswith( "--def-files=" ): TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" ) # -------------------------------------------------------------- # Scan C source and header files using trace macros. # c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE ) trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' ) for d in SRC_FILE_DIRS: for ( p, dlst, flst ) in os.walk( d ): for f in flst: if c_pathname_pat.match( f ) != None: src_pathname = os.path.join( p, f ) line_num = 0 for src_line in open( src_pathname, 'r' ): line_num = line_num + 1 src_line = src_line.strip() if trace_use_pat.match( src_line ) != None: component_name = trace_use_pat.sub( '', src_line ) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) ) else: USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ] # -------------------------------------------------------------- # Scan header file(s) defining trace macros. # trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' ) trace_def_pat_cls = re.compile( '[ \t\)].*$' ) for f in TRACE_DEF_FILES: line_num = 0 for hdr_line in open( f, 'r' ): line_num = line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match( hdr_line ) != None: component_name = trace_def_pat_opn.sub( '', hdr_line ) component_name = trace_def_pat_cls.sub( '', component_name ) if component_name in KNOWN_COMPONENT: print "trace component %s is defined twice, see %s and fttrace.h:%d" % \ ( component_name, KNOWN_COMPONENT[component_name], line_num ) else: KNOWN_COMPONENT[component_name] = "%s:%d" % \ ( os.path.basename( f ), line_num ) # -------------------------------------------------------------- # Compare the used and defined trace macros. # print "# Trace component used in the implementations but not defined in fttrace.h." cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in KNOWN_COMPONENT: print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) ) print "# Trace component is defined but not used in the implementations." cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in USED_COMPONENT: if c != "any": print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
[ [ 1, 0, 0.0796, 0.0088, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0885, 0.0088, 0, 0.66, 0.0455, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0973, 0.0088, 0, ...
[ "import sys", "import os", "import re", "SRC_FILE_LIST = []", "USED_COMPONENT = {}", "KNOWN_COMPONENT = {}", "SRC_FILE_DIRS = [ \"src\" ]", "TRACE_DEF_FILES = [ \"include/freetype/internal/fttrace.h\" ]", "for i in range( 1, len( sys.argv ) ):\n if sys.argv[i].startswith( \"--help\" ):\n pr...
#!/usr/bin/env python # # # FreeType 2 glyph name builder # # Copyright 1996-2000, 2003, 2005, 2007, 2008, 2011 by # David Turner, Robert Wilhelm, and Werner Lemberg. # # This file is part of the FreeType project, and may only be used, modified, # and distributed under the terms of the FreeType project license, # LICENSE.TXT. By continuing to use, modify, or distribute this file you # indicate that you have read the license and understand and accept it # fully. """\ usage: %s <output-file> This python script generates the glyph names tables defined in the `psnames' module. Its single argument is the name of the header file to be created. """ import sys, string, struct, re, os.path # This table lists the glyphs according to the Macintosh specification. # It is used by the TrueType Postscript names table. # # See # # http://fonts.apple.com/TTRefMan/RM06/Chap6post.html # # for the official list. # mac_standard_names = \ [ # 0 ".notdef", ".null", "nonmarkingreturn", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", "ampersand", # 10 "quotesingle", "parenleft", "parenright", "asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", # 20 "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "colon", # 30 "semicolon", "less", "equal", "greater", "question", "at", "A", "B", "C", "D", # 40 "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", # 50 "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", # 60 "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", "underscore", "grave", "a", "b", # 70 "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", # 80 "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", # 90 "w", "x", "y", "z", "braceleft", "bar", "braceright", "asciitilde", "Adieresis", "Aring", # 100 "Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis", "aacute", "agrave", "acircumflex", "adieresis", "atilde", # 110 "aring", "ccedilla", "eacute", "egrave", "ecircumflex", "edieresis", "iacute", "igrave", "icircumflex", "idieresis", # 120 "ntilde", "oacute", "ograve", "ocircumflex", "odieresis", "otilde", "uacute", "ugrave", "ucircumflex", "udieresis", # 130 "dagger", "degree", "cent", "sterling", "section", "bullet", "paragraph", "germandbls", "registered", "copyright", # 140 "trademark", "acute", "dieresis", "notequal", "AE", "Oslash", "infinity", "plusminus", "lessequal", "greaterequal", # 150 "yen", "mu", "partialdiff", "summation", "product", "pi", "integral", "ordfeminine", "ordmasculine", "Omega", # 160 "ae", "oslash", "questiondown", "exclamdown", "logicalnot", "radical", "florin", "approxequal", "Delta", "guillemotleft", # 170 "guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde", "Otilde", "OE", "oe", "endash", "emdash", # 180 "quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide", "lozenge", "ydieresis", "Ydieresis", "fraction", "currency", # 190 "guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl", "periodcentered", "quotesinglbase", "quotedblbase", "perthousand", "Acircumflex", # 200 "Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex", # 210 "apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave", "dotlessi", "circumflex", "tilde", "macron", "breve", # 220 "dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek", "caron", "Lslash", "lslash", "Scaron", "scaron", # 230 "Zcaron", "zcaron", "brokenbar", "Eth", "eth", "Yacute", "yacute", "Thorn", "thorn", "minus", # 240 "multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf", "onequarter", "threequarters", "franc", "Gbreve", "gbreve", # 250 "Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute", "Ccaron", "ccaron", "dcroat" ] # The list of standard `SID' glyph names. For the official list, # see Annex A of document at # # http://partners.adobe.com/public/developer/en/font/5176.CFF.pdf . # sid_standard_names = \ [ # 0 ".notdef", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", "ampersand", "quoteright", "parenleft", # 10 "parenright", "asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", # 20 "three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", "less", # 30 "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", # 40 "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", # 50 "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", # 60 "bracketleft", "backslash", "bracketright", "asciicircum", "underscore", "quoteleft", "a", "b", "c", "d", # 70 "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", # 80 "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", # 90 "y", "z", "braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", "sterling", "fraction", # 100 "yen", "florin", "section", "currency", "quotesingle", "quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", # 110 "fl", "endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright", # 120 "guillemotright", "ellipsis", "perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", "macron", "breve", # 130 "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", "ogonek", "caron", "emdash", "AE", "ordfeminine", # 140 "Lslash", "Oslash", "OE", "ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", # 150 "onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", "Thorn", "onequarter", "divide", # 160 "brokenbar", "degree", "thorn", "threequarters", "twosuperior", "registered", "minus", "eth", "multiply", "threesuperior", # 170 "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", "Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", # 180 "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis", # 190 "Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", # 200 "aacute", "acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", "ecircumflex", "edieresis", # 210 "egrave", "iacute", "icircumflex", "idieresis", "igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", # 220 "otilde", "scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall", # 230 "Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", "parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", "zerooldstyle", # 240 "oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "commasuperior", # 250 "threequartersemdash", "periodsuperior", "questionsmall", "asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior", # 260 "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", "tsuperior", "ff", "ffi", "ffl", "parenleftinferior", # 270 "parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", "Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", # 280 "Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", # 290 "Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall", # 300 "colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", "Dieresissmall", # 310 "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", "questiondownsmall", # 320 "oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", "zerosuperior", "foursuperior", "fivesuperior", "sixsuperior", # 330 "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", "fiveinferior", "sixinferior", # 340 "seveninferior", "eightinferior", "nineinferior", "centinferior", "dollarinferior", "periodinferior", "commainferior", "Agravesmall", "Aacutesmall", "Acircumflexsmall", # 350 "Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", # 360 "Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", "Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", "Odieresissmall", # 370 "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", "Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall", "001.000", # 380 "001.001", "001.002", "001.003", "Black", "Bold", "Book", "Light", "Medium", "Regular", "Roman", # 390 "Semibold" ] # This table maps character codes of the Adobe Standard Type 1 # encoding to glyph indices in the sid_standard_names table. # t1_standard_encoding = \ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 0, 111, 112, 113, 114, 0, 115, 116, 117, 118, 119, 120, 121, 122, 0, 123, 0, 124, 125, 126, 127, 128, 129, 130, 131, 0, 132, 133, 0, 134, 135, 136, 137, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, 0, 139, 0, 0, 0, 0, 140, 141, 142, 143, 0, 0, 0, 0, 0, 144, 0, 0, 0, 145, 0, 0, 146, 147, 148, 149, 0, 0, 0, 0 ] # This table maps character codes of the Adobe Expert Type 1 # encoding to glyph indices in the sid_standard_names table. # t1_expert_encoding = \ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 229, 230, 0, 231, 232, 233, 234, 235, 236, 237, 238, 13, 14, 15, 99, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 27, 28, 249, 250, 251, 252, 0, 253, 254, 255, 256, 257, 0, 0, 0, 258, 0, 0, 259, 260, 261, 262, 0, 0, 263, 264, 265, 0, 266, 109, 110, 267, 268, 269, 0, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 304, 305, 306, 0, 0, 307, 308, 309, 310, 311, 0, 312, 0, 0, 313, 0, 0, 314, 315, 0, 0, 316, 317, 318, 0, 0, 0, 158, 155, 163, 319, 320, 321, 322, 323, 324, 325, 0, 0, 326, 150, 164, 169, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378 ] # This data has been taken literally from the files `glyphlist.txt' # and `zapfdingbats.txt' version 2.0, Sept 2002. It is available from # # http://sourceforge.net/adobe/aglfn/ # adobe_glyph_list = """\ A;0041 AE;00C6 AEacute;01FC AEmacron;01E2 AEsmall;F7E6 Aacute;00C1 Aacutesmall;F7E1 Abreve;0102 Abreveacute;1EAE Abrevecyrillic;04D0 Abrevedotbelow;1EB6 Abrevegrave;1EB0 Abrevehookabove;1EB2 Abrevetilde;1EB4 Acaron;01CD Acircle;24B6 Acircumflex;00C2 Acircumflexacute;1EA4 Acircumflexdotbelow;1EAC Acircumflexgrave;1EA6 Acircumflexhookabove;1EA8 Acircumflexsmall;F7E2 Acircumflextilde;1EAA Acute;F6C9 Acutesmall;F7B4 Acyrillic;0410 Adblgrave;0200 Adieresis;00C4 Adieresiscyrillic;04D2 Adieresismacron;01DE Adieresissmall;F7E4 Adotbelow;1EA0 Adotmacron;01E0 Agrave;00C0 Agravesmall;F7E0 Ahookabove;1EA2 Aiecyrillic;04D4 Ainvertedbreve;0202 Alpha;0391 Alphatonos;0386 Amacron;0100 Amonospace;FF21 Aogonek;0104 Aring;00C5 Aringacute;01FA Aringbelow;1E00 Aringsmall;F7E5 Asmall;F761 Atilde;00C3 Atildesmall;F7E3 Aybarmenian;0531 B;0042 Bcircle;24B7 Bdotaccent;1E02 Bdotbelow;1E04 Becyrillic;0411 Benarmenian;0532 Beta;0392 Bhook;0181 Blinebelow;1E06 Bmonospace;FF22 Brevesmall;F6F4 Bsmall;F762 Btopbar;0182 C;0043 Caarmenian;053E Cacute;0106 Caron;F6CA Caronsmall;F6F5 Ccaron;010C Ccedilla;00C7 Ccedillaacute;1E08 Ccedillasmall;F7E7 Ccircle;24B8 Ccircumflex;0108 Cdot;010A Cdotaccent;010A Cedillasmall;F7B8 Chaarmenian;0549 Cheabkhasiancyrillic;04BC Checyrillic;0427 Chedescenderabkhasiancyrillic;04BE Chedescendercyrillic;04B6 Chedieresiscyrillic;04F4 Cheharmenian;0543 Chekhakassiancyrillic;04CB Cheverticalstrokecyrillic;04B8 Chi;03A7 Chook;0187 Circumflexsmall;F6F6 Cmonospace;FF23 Coarmenian;0551 Csmall;F763 D;0044 DZ;01F1 DZcaron;01C4 Daarmenian;0534 Dafrican;0189 Dcaron;010E Dcedilla;1E10 Dcircle;24B9 Dcircumflexbelow;1E12 Dcroat;0110 Ddotaccent;1E0A Ddotbelow;1E0C Decyrillic;0414 Deicoptic;03EE Delta;2206 Deltagreek;0394 Dhook;018A Dieresis;F6CB DieresisAcute;F6CC DieresisGrave;F6CD Dieresissmall;F7A8 Digammagreek;03DC Djecyrillic;0402 Dlinebelow;1E0E Dmonospace;FF24 Dotaccentsmall;F6F7 Dslash;0110 Dsmall;F764 Dtopbar;018B Dz;01F2 Dzcaron;01C5 Dzeabkhasiancyrillic;04E0 Dzecyrillic;0405 Dzhecyrillic;040F E;0045 Eacute;00C9 Eacutesmall;F7E9 Ebreve;0114 Ecaron;011A Ecedillabreve;1E1C Echarmenian;0535 Ecircle;24BA Ecircumflex;00CA Ecircumflexacute;1EBE Ecircumflexbelow;1E18 Ecircumflexdotbelow;1EC6 Ecircumflexgrave;1EC0 Ecircumflexhookabove;1EC2 Ecircumflexsmall;F7EA Ecircumflextilde;1EC4 Ecyrillic;0404 Edblgrave;0204 Edieresis;00CB Edieresissmall;F7EB Edot;0116 Edotaccent;0116 Edotbelow;1EB8 Efcyrillic;0424 Egrave;00C8 Egravesmall;F7E8 Eharmenian;0537 Ehookabove;1EBA Eightroman;2167 Einvertedbreve;0206 Eiotifiedcyrillic;0464 Elcyrillic;041B Elevenroman;216A Emacron;0112 Emacronacute;1E16 Emacrongrave;1E14 Emcyrillic;041C Emonospace;FF25 Encyrillic;041D Endescendercyrillic;04A2 Eng;014A Enghecyrillic;04A4 Enhookcyrillic;04C7 Eogonek;0118 Eopen;0190 Epsilon;0395 Epsilontonos;0388 Ercyrillic;0420 Ereversed;018E Ereversedcyrillic;042D Escyrillic;0421 Esdescendercyrillic;04AA Esh;01A9 Esmall;F765 Eta;0397 Etarmenian;0538 Etatonos;0389 Eth;00D0 Ethsmall;F7F0 Etilde;1EBC Etildebelow;1E1A Euro;20AC Ezh;01B7 Ezhcaron;01EE Ezhreversed;01B8 F;0046 Fcircle;24BB Fdotaccent;1E1E Feharmenian;0556 Feicoptic;03E4 Fhook;0191 Fitacyrillic;0472 Fiveroman;2164 Fmonospace;FF26 Fourroman;2163 Fsmall;F766 G;0047 GBsquare;3387 Gacute;01F4 Gamma;0393 Gammaafrican;0194 Gangiacoptic;03EA Gbreve;011E Gcaron;01E6 Gcedilla;0122 Gcircle;24BC Gcircumflex;011C Gcommaaccent;0122 Gdot;0120 Gdotaccent;0120 Gecyrillic;0413 Ghadarmenian;0542 Ghemiddlehookcyrillic;0494 Ghestrokecyrillic;0492 Gheupturncyrillic;0490 Ghook;0193 Gimarmenian;0533 Gjecyrillic;0403 Gmacron;1E20 Gmonospace;FF27 Grave;F6CE Gravesmall;F760 Gsmall;F767 Gsmallhook;029B Gstroke;01E4 H;0048 H18533;25CF H18543;25AA H18551;25AB H22073;25A1 HPsquare;33CB Haabkhasiancyrillic;04A8 Hadescendercyrillic;04B2 Hardsigncyrillic;042A Hbar;0126 Hbrevebelow;1E2A Hcedilla;1E28 Hcircle;24BD Hcircumflex;0124 Hdieresis;1E26 Hdotaccent;1E22 Hdotbelow;1E24 Hmonospace;FF28 Hoarmenian;0540 Horicoptic;03E8 Hsmall;F768 Hungarumlaut;F6CF Hungarumlautsmall;F6F8 Hzsquare;3390 I;0049 IAcyrillic;042F IJ;0132 IUcyrillic;042E Iacute;00CD Iacutesmall;F7ED Ibreve;012C Icaron;01CF Icircle;24BE Icircumflex;00CE Icircumflexsmall;F7EE Icyrillic;0406 Idblgrave;0208 Idieresis;00CF Idieresisacute;1E2E Idieresiscyrillic;04E4 Idieresissmall;F7EF Idot;0130 Idotaccent;0130 Idotbelow;1ECA Iebrevecyrillic;04D6 Iecyrillic;0415 Ifraktur;2111 Igrave;00CC Igravesmall;F7EC Ihookabove;1EC8 Iicyrillic;0418 Iinvertedbreve;020A Iishortcyrillic;0419 Imacron;012A Imacroncyrillic;04E2 Imonospace;FF29 Iniarmenian;053B Iocyrillic;0401 Iogonek;012E Iota;0399 Iotaafrican;0196 Iotadieresis;03AA Iotatonos;038A Ismall;F769 Istroke;0197 Itilde;0128 Itildebelow;1E2C Izhitsacyrillic;0474 Izhitsadblgravecyrillic;0476 J;004A Jaarmenian;0541 Jcircle;24BF Jcircumflex;0134 Jecyrillic;0408 Jheharmenian;054B Jmonospace;FF2A Jsmall;F76A K;004B KBsquare;3385 KKsquare;33CD Kabashkircyrillic;04A0 Kacute;1E30 Kacyrillic;041A Kadescendercyrillic;049A Kahookcyrillic;04C3 Kappa;039A Kastrokecyrillic;049E Kaverticalstrokecyrillic;049C Kcaron;01E8 Kcedilla;0136 Kcircle;24C0 Kcommaaccent;0136 Kdotbelow;1E32 Keharmenian;0554 Kenarmenian;053F Khacyrillic;0425 Kheicoptic;03E6 Khook;0198 Kjecyrillic;040C Klinebelow;1E34 Kmonospace;FF2B Koppacyrillic;0480 Koppagreek;03DE Ksicyrillic;046E Ksmall;F76B L;004C LJ;01C7 LL;F6BF Lacute;0139 Lambda;039B Lcaron;013D Lcedilla;013B Lcircle;24C1 Lcircumflexbelow;1E3C Lcommaaccent;013B Ldot;013F Ldotaccent;013F Ldotbelow;1E36 Ldotbelowmacron;1E38 Liwnarmenian;053C Lj;01C8 Ljecyrillic;0409 Llinebelow;1E3A Lmonospace;FF2C Lslash;0141 Lslashsmall;F6F9 Lsmall;F76C M;004D MBsquare;3386 Macron;F6D0 Macronsmall;F7AF Macute;1E3E Mcircle;24C2 Mdotaccent;1E40 Mdotbelow;1E42 Menarmenian;0544 Mmonospace;FF2D Msmall;F76D Mturned;019C Mu;039C N;004E NJ;01CA Nacute;0143 Ncaron;0147 Ncedilla;0145 Ncircle;24C3 Ncircumflexbelow;1E4A Ncommaaccent;0145 Ndotaccent;1E44 Ndotbelow;1E46 Nhookleft;019D Nineroman;2168 Nj;01CB Njecyrillic;040A Nlinebelow;1E48 Nmonospace;FF2E Nowarmenian;0546 Nsmall;F76E Ntilde;00D1 Ntildesmall;F7F1 Nu;039D O;004F OE;0152 OEsmall;F6FA Oacute;00D3 Oacutesmall;F7F3 Obarredcyrillic;04E8 Obarreddieresiscyrillic;04EA Obreve;014E Ocaron;01D1 Ocenteredtilde;019F Ocircle;24C4 Ocircumflex;00D4 Ocircumflexacute;1ED0 Ocircumflexdotbelow;1ED8 Ocircumflexgrave;1ED2 Ocircumflexhookabove;1ED4 Ocircumflexsmall;F7F4 Ocircumflextilde;1ED6 Ocyrillic;041E Odblacute;0150 Odblgrave;020C Odieresis;00D6 Odieresiscyrillic;04E6 Odieresissmall;F7F6 Odotbelow;1ECC Ogoneksmall;F6FB Ograve;00D2 Ogravesmall;F7F2 Oharmenian;0555 Ohm;2126 Ohookabove;1ECE Ohorn;01A0 Ohornacute;1EDA Ohorndotbelow;1EE2 Ohorngrave;1EDC Ohornhookabove;1EDE Ohorntilde;1EE0 Ohungarumlaut;0150 Oi;01A2 Oinvertedbreve;020E Omacron;014C Omacronacute;1E52 Omacrongrave;1E50 Omega;2126 Omegacyrillic;0460 Omegagreek;03A9 Omegaroundcyrillic;047A Omegatitlocyrillic;047C Omegatonos;038F Omicron;039F Omicrontonos;038C Omonospace;FF2F Oneroman;2160 Oogonek;01EA Oogonekmacron;01EC Oopen;0186 Oslash;00D8 Oslashacute;01FE Oslashsmall;F7F8 Osmall;F76F Ostrokeacute;01FE Otcyrillic;047E Otilde;00D5 Otildeacute;1E4C Otildedieresis;1E4E Otildesmall;F7F5 P;0050 Pacute;1E54 Pcircle;24C5 Pdotaccent;1E56 Pecyrillic;041F Peharmenian;054A Pemiddlehookcyrillic;04A6 Phi;03A6 Phook;01A4 Pi;03A0 Piwrarmenian;0553 Pmonospace;FF30 Psi;03A8 Psicyrillic;0470 Psmall;F770 Q;0051 Qcircle;24C6 Qmonospace;FF31 Qsmall;F771 R;0052 Raarmenian;054C Racute;0154 Rcaron;0158 Rcedilla;0156 Rcircle;24C7 Rcommaaccent;0156 Rdblgrave;0210 Rdotaccent;1E58 Rdotbelow;1E5A Rdotbelowmacron;1E5C Reharmenian;0550 Rfraktur;211C Rho;03A1 Ringsmall;F6FC Rinvertedbreve;0212 Rlinebelow;1E5E Rmonospace;FF32 Rsmall;F772 Rsmallinverted;0281 Rsmallinvertedsuperior;02B6 S;0053 SF010000;250C SF020000;2514 SF030000;2510 SF040000;2518 SF050000;253C SF060000;252C SF070000;2534 SF080000;251C SF090000;2524 SF100000;2500 SF110000;2502 SF190000;2561 SF200000;2562 SF210000;2556 SF220000;2555 SF230000;2563 SF240000;2551 SF250000;2557 SF260000;255D SF270000;255C SF280000;255B SF360000;255E SF370000;255F SF380000;255A SF390000;2554 SF400000;2569 SF410000;2566 SF420000;2560 SF430000;2550 SF440000;256C SF450000;2567 SF460000;2568 SF470000;2564 SF480000;2565 SF490000;2559 SF500000;2558 SF510000;2552 SF520000;2553 SF530000;256B SF540000;256A Sacute;015A Sacutedotaccent;1E64 Sampigreek;03E0 Scaron;0160 Scarondotaccent;1E66 Scaronsmall;F6FD Scedilla;015E Schwa;018F Schwacyrillic;04D8 Schwadieresiscyrillic;04DA Scircle;24C8 Scircumflex;015C Scommaaccent;0218 Sdotaccent;1E60 Sdotbelow;1E62 Sdotbelowdotaccent;1E68 Seharmenian;054D Sevenroman;2166 Shaarmenian;0547 Shacyrillic;0428 Shchacyrillic;0429 Sheicoptic;03E2 Shhacyrillic;04BA Shimacoptic;03EC Sigma;03A3 Sixroman;2165 Smonospace;FF33 Softsigncyrillic;042C Ssmall;F773 Stigmagreek;03DA T;0054 Tau;03A4 Tbar;0166 Tcaron;0164 Tcedilla;0162 Tcircle;24C9 Tcircumflexbelow;1E70 Tcommaaccent;0162 Tdotaccent;1E6A Tdotbelow;1E6C Tecyrillic;0422 Tedescendercyrillic;04AC Tenroman;2169 Tetsecyrillic;04B4 Theta;0398 Thook;01AC Thorn;00DE Thornsmall;F7FE Threeroman;2162 Tildesmall;F6FE Tiwnarmenian;054F Tlinebelow;1E6E Tmonospace;FF34 Toarmenian;0539 Tonefive;01BC Tonesix;0184 Tonetwo;01A7 Tretroflexhook;01AE Tsecyrillic;0426 Tshecyrillic;040B Tsmall;F774 Twelveroman;216B Tworoman;2161 U;0055 Uacute;00DA Uacutesmall;F7FA Ubreve;016C Ucaron;01D3 Ucircle;24CA Ucircumflex;00DB Ucircumflexbelow;1E76 Ucircumflexsmall;F7FB Ucyrillic;0423 Udblacute;0170 Udblgrave;0214 Udieresis;00DC Udieresisacute;01D7 Udieresisbelow;1E72 Udieresiscaron;01D9 Udieresiscyrillic;04F0 Udieresisgrave;01DB Udieresismacron;01D5 Udieresissmall;F7FC Udotbelow;1EE4 Ugrave;00D9 Ugravesmall;F7F9 Uhookabove;1EE6 Uhorn;01AF Uhornacute;1EE8 Uhorndotbelow;1EF0 Uhorngrave;1EEA Uhornhookabove;1EEC Uhorntilde;1EEE Uhungarumlaut;0170 Uhungarumlautcyrillic;04F2 Uinvertedbreve;0216 Ukcyrillic;0478 Umacron;016A Umacroncyrillic;04EE Umacrondieresis;1E7A Umonospace;FF35 Uogonek;0172 Upsilon;03A5 Upsilon1;03D2 Upsilonacutehooksymbolgreek;03D3 Upsilonafrican;01B1 Upsilondieresis;03AB Upsilondieresishooksymbolgreek;03D4 Upsilonhooksymbol;03D2 Upsilontonos;038E Uring;016E Ushortcyrillic;040E Usmall;F775 Ustraightcyrillic;04AE Ustraightstrokecyrillic;04B0 Utilde;0168 Utildeacute;1E78 Utildebelow;1E74 V;0056 Vcircle;24CB Vdotbelow;1E7E Vecyrillic;0412 Vewarmenian;054E Vhook;01B2 Vmonospace;FF36 Voarmenian;0548 Vsmall;F776 Vtilde;1E7C W;0057 Wacute;1E82 Wcircle;24CC Wcircumflex;0174 Wdieresis;1E84 Wdotaccent;1E86 Wdotbelow;1E88 Wgrave;1E80 Wmonospace;FF37 Wsmall;F777 X;0058 Xcircle;24CD Xdieresis;1E8C Xdotaccent;1E8A Xeharmenian;053D Xi;039E Xmonospace;FF38 Xsmall;F778 Y;0059 Yacute;00DD Yacutesmall;F7FD Yatcyrillic;0462 Ycircle;24CE Ycircumflex;0176 Ydieresis;0178 Ydieresissmall;F7FF Ydotaccent;1E8E Ydotbelow;1EF4 Yericyrillic;042B Yerudieresiscyrillic;04F8 Ygrave;1EF2 Yhook;01B3 Yhookabove;1EF6 Yiarmenian;0545 Yicyrillic;0407 Yiwnarmenian;0552 Ymonospace;FF39 Ysmall;F779 Ytilde;1EF8 Yusbigcyrillic;046A Yusbigiotifiedcyrillic;046C Yuslittlecyrillic;0466 Yuslittleiotifiedcyrillic;0468 Z;005A Zaarmenian;0536 Zacute;0179 Zcaron;017D Zcaronsmall;F6FF Zcircle;24CF Zcircumflex;1E90 Zdot;017B Zdotaccent;017B Zdotbelow;1E92 Zecyrillic;0417 Zedescendercyrillic;0498 Zedieresiscyrillic;04DE Zeta;0396 Zhearmenian;053A Zhebrevecyrillic;04C1 Zhecyrillic;0416 Zhedescendercyrillic;0496 Zhedieresiscyrillic;04DC Zlinebelow;1E94 Zmonospace;FF3A Zsmall;F77A Zstroke;01B5 a;0061 aabengali;0986 aacute;00E1 aadeva;0906 aagujarati;0A86 aagurmukhi;0A06 aamatragurmukhi;0A3E aarusquare;3303 aavowelsignbengali;09BE aavowelsigndeva;093E aavowelsigngujarati;0ABE abbreviationmarkarmenian;055F abbreviationsigndeva;0970 abengali;0985 abopomofo;311A abreve;0103 abreveacute;1EAF abrevecyrillic;04D1 abrevedotbelow;1EB7 abrevegrave;1EB1 abrevehookabove;1EB3 abrevetilde;1EB5 acaron;01CE acircle;24D0 acircumflex;00E2 acircumflexacute;1EA5 acircumflexdotbelow;1EAD acircumflexgrave;1EA7 acircumflexhookabove;1EA9 acircumflextilde;1EAB acute;00B4 acutebelowcmb;0317 acutecmb;0301 acutecomb;0301 acutedeva;0954 acutelowmod;02CF acutetonecmb;0341 acyrillic;0430 adblgrave;0201 addakgurmukhi;0A71 adeva;0905 adieresis;00E4 adieresiscyrillic;04D3 adieresismacron;01DF adotbelow;1EA1 adotmacron;01E1 ae;00E6 aeacute;01FD aekorean;3150 aemacron;01E3 afii00208;2015 afii08941;20A4 afii10017;0410 afii10018;0411 afii10019;0412 afii10020;0413 afii10021;0414 afii10022;0415 afii10023;0401 afii10024;0416 afii10025;0417 afii10026;0418 afii10027;0419 afii10028;041A afii10029;041B afii10030;041C afii10031;041D afii10032;041E afii10033;041F afii10034;0420 afii10035;0421 afii10036;0422 afii10037;0423 afii10038;0424 afii10039;0425 afii10040;0426 afii10041;0427 afii10042;0428 afii10043;0429 afii10044;042A afii10045;042B afii10046;042C afii10047;042D afii10048;042E afii10049;042F afii10050;0490 afii10051;0402 afii10052;0403 afii10053;0404 afii10054;0405 afii10055;0406 afii10056;0407 afii10057;0408 afii10058;0409 afii10059;040A afii10060;040B afii10061;040C afii10062;040E afii10063;F6C4 afii10064;F6C5 afii10065;0430 afii10066;0431 afii10067;0432 afii10068;0433 afii10069;0434 afii10070;0435 afii10071;0451 afii10072;0436 afii10073;0437 afii10074;0438 afii10075;0439 afii10076;043A afii10077;043B afii10078;043C afii10079;043D afii10080;043E afii10081;043F afii10082;0440 afii10083;0441 afii10084;0442 afii10085;0443 afii10086;0444 afii10087;0445 afii10088;0446 afii10089;0447 afii10090;0448 afii10091;0449 afii10092;044A afii10093;044B afii10094;044C afii10095;044D afii10096;044E afii10097;044F afii10098;0491 afii10099;0452 afii10100;0453 afii10101;0454 afii10102;0455 afii10103;0456 afii10104;0457 afii10105;0458 afii10106;0459 afii10107;045A afii10108;045B afii10109;045C afii10110;045E afii10145;040F afii10146;0462 afii10147;0472 afii10148;0474 afii10192;F6C6 afii10193;045F afii10194;0463 afii10195;0473 afii10196;0475 afii10831;F6C7 afii10832;F6C8 afii10846;04D9 afii299;200E afii300;200F afii301;200D afii57381;066A afii57388;060C afii57392;0660 afii57393;0661 afii57394;0662 afii57395;0663 afii57396;0664 afii57397;0665 afii57398;0666 afii57399;0667 afii57400;0668 afii57401;0669 afii57403;061B afii57407;061F afii57409;0621 afii57410;0622 afii57411;0623 afii57412;0624 afii57413;0625 afii57414;0626 afii57415;0627 afii57416;0628 afii57417;0629 afii57418;062A afii57419;062B afii57420;062C afii57421;062D afii57422;062E afii57423;062F afii57424;0630 afii57425;0631 afii57426;0632 afii57427;0633 afii57428;0634 afii57429;0635 afii57430;0636 afii57431;0637 afii57432;0638 afii57433;0639 afii57434;063A afii57440;0640 afii57441;0641 afii57442;0642 afii57443;0643 afii57444;0644 afii57445;0645 afii57446;0646 afii57448;0648 afii57449;0649 afii57450;064A afii57451;064B afii57452;064C afii57453;064D afii57454;064E afii57455;064F afii57456;0650 afii57457;0651 afii57458;0652 afii57470;0647 afii57505;06A4 afii57506;067E afii57507;0686 afii57508;0698 afii57509;06AF afii57511;0679 afii57512;0688 afii57513;0691 afii57514;06BA afii57519;06D2 afii57534;06D5 afii57636;20AA afii57645;05BE afii57658;05C3 afii57664;05D0 afii57665;05D1 afii57666;05D2 afii57667;05D3 afii57668;05D4 afii57669;05D5 afii57670;05D6 afii57671;05D7 afii57672;05D8 afii57673;05D9 afii57674;05DA afii57675;05DB afii57676;05DC afii57677;05DD afii57678;05DE afii57679;05DF afii57680;05E0 afii57681;05E1 afii57682;05E2 afii57683;05E3 afii57684;05E4 afii57685;05E5 afii57686;05E6 afii57687;05E7 afii57688;05E8 afii57689;05E9 afii57690;05EA afii57694;FB2A afii57695;FB2B afii57700;FB4B afii57705;FB1F afii57716;05F0 afii57717;05F1 afii57718;05F2 afii57723;FB35 afii57793;05B4 afii57794;05B5 afii57795;05B6 afii57796;05BB afii57797;05B8 afii57798;05B7 afii57799;05B0 afii57800;05B2 afii57801;05B1 afii57802;05B3 afii57803;05C2 afii57804;05C1 afii57806;05B9 afii57807;05BC afii57839;05BD afii57841;05BF afii57842;05C0 afii57929;02BC afii61248;2105 afii61289;2113 afii61352;2116 afii61573;202C afii61574;202D afii61575;202E afii61664;200C afii63167;066D afii64937;02BD agrave;00E0 agujarati;0A85 agurmukhi;0A05 ahiragana;3042 ahookabove;1EA3 aibengali;0990 aibopomofo;311E aideva;0910 aiecyrillic;04D5 aigujarati;0A90 aigurmukhi;0A10 aimatragurmukhi;0A48 ainarabic;0639 ainfinalarabic;FECA aininitialarabic;FECB ainmedialarabic;FECC ainvertedbreve;0203 aivowelsignbengali;09C8 aivowelsigndeva;0948 aivowelsigngujarati;0AC8 akatakana;30A2 akatakanahalfwidth;FF71 akorean;314F alef;05D0 alefarabic;0627 alefdageshhebrew;FB30 aleffinalarabic;FE8E alefhamzaabovearabic;0623 alefhamzaabovefinalarabic;FE84 alefhamzabelowarabic;0625 alefhamzabelowfinalarabic;FE88 alefhebrew;05D0 aleflamedhebrew;FB4F alefmaddaabovearabic;0622 alefmaddaabovefinalarabic;FE82 alefmaksuraarabic;0649 alefmaksurafinalarabic;FEF0 alefmaksurainitialarabic;FEF3 alefmaksuramedialarabic;FEF4 alefpatahhebrew;FB2E alefqamatshebrew;FB2F aleph;2135 allequal;224C alpha;03B1 alphatonos;03AC amacron;0101 amonospace;FF41 ampersand;0026 ampersandmonospace;FF06 ampersandsmall;F726 amsquare;33C2 anbopomofo;3122 angbopomofo;3124 angkhankhuthai;0E5A angle;2220 anglebracketleft;3008 anglebracketleftvertical;FE3F anglebracketright;3009 anglebracketrightvertical;FE40 angleleft;2329 angleright;232A angstrom;212B anoteleia;0387 anudattadeva;0952 anusvarabengali;0982 anusvaradeva;0902 anusvaragujarati;0A82 aogonek;0105 apaatosquare;3300 aparen;249C apostrophearmenian;055A apostrophemod;02BC apple;F8FF approaches;2250 approxequal;2248 approxequalorimage;2252 approximatelyequal;2245 araeaekorean;318E araeakorean;318D arc;2312 arighthalfring;1E9A aring;00E5 aringacute;01FB aringbelow;1E01 arrowboth;2194 arrowdashdown;21E3 arrowdashleft;21E0 arrowdashright;21E2 arrowdashup;21E1 arrowdblboth;21D4 arrowdbldown;21D3 arrowdblleft;21D0 arrowdblright;21D2 arrowdblup;21D1 arrowdown;2193 arrowdownleft;2199 arrowdownright;2198 arrowdownwhite;21E9 arrowheaddownmod;02C5 arrowheadleftmod;02C2 arrowheadrightmod;02C3 arrowheadupmod;02C4 arrowhorizex;F8E7 arrowleft;2190 arrowleftdbl;21D0 arrowleftdblstroke;21CD arrowleftoverright;21C6 arrowleftwhite;21E6 arrowright;2192 arrowrightdblstroke;21CF arrowrightheavy;279E arrowrightoverleft;21C4 arrowrightwhite;21E8 arrowtableft;21E4 arrowtabright;21E5 arrowup;2191 arrowupdn;2195 arrowupdnbse;21A8 arrowupdownbase;21A8 arrowupleft;2196 arrowupleftofdown;21C5 arrowupright;2197 arrowupwhite;21E7 arrowvertex;F8E6 asciicircum;005E asciicircummonospace;FF3E asciitilde;007E asciitildemonospace;FF5E ascript;0251 ascriptturned;0252 asmallhiragana;3041 asmallkatakana;30A1 asmallkatakanahalfwidth;FF67 asterisk;002A asteriskaltonearabic;066D asteriskarabic;066D asteriskmath;2217 asteriskmonospace;FF0A asterisksmall;FE61 asterism;2042 asuperior;F6E9 asymptoticallyequal;2243 at;0040 atilde;00E3 atmonospace;FF20 atsmall;FE6B aturned;0250 aubengali;0994 aubopomofo;3120 audeva;0914 augujarati;0A94 augurmukhi;0A14 aulengthmarkbengali;09D7 aumatragurmukhi;0A4C auvowelsignbengali;09CC auvowelsigndeva;094C auvowelsigngujarati;0ACC avagrahadeva;093D aybarmenian;0561 ayin;05E2 ayinaltonehebrew;FB20 ayinhebrew;05E2 b;0062 babengali;09AC backslash;005C backslashmonospace;FF3C badeva;092C bagujarati;0AAC bagurmukhi;0A2C bahiragana;3070 bahtthai;0E3F bakatakana;30D0 bar;007C barmonospace;FF5C bbopomofo;3105 bcircle;24D1 bdotaccent;1E03 bdotbelow;1E05 beamedsixteenthnotes;266C because;2235 becyrillic;0431 beharabic;0628 behfinalarabic;FE90 behinitialarabic;FE91 behiragana;3079 behmedialarabic;FE92 behmeeminitialarabic;FC9F behmeemisolatedarabic;FC08 behnoonfinalarabic;FC6D bekatakana;30D9 benarmenian;0562 bet;05D1 beta;03B2 betasymbolgreek;03D0 betdagesh;FB31 betdageshhebrew;FB31 bethebrew;05D1 betrafehebrew;FB4C bhabengali;09AD bhadeva;092D bhagujarati;0AAD bhagurmukhi;0A2D bhook;0253 bihiragana;3073 bikatakana;30D3 bilabialclick;0298 bindigurmukhi;0A02 birusquare;3331 blackcircle;25CF blackdiamond;25C6 blackdownpointingtriangle;25BC blackleftpointingpointer;25C4 blackleftpointingtriangle;25C0 blacklenticularbracketleft;3010 blacklenticularbracketleftvertical;FE3B blacklenticularbracketright;3011 blacklenticularbracketrightvertical;FE3C blacklowerlefttriangle;25E3 blacklowerrighttriangle;25E2 blackrectangle;25AC blackrightpointingpointer;25BA blackrightpointingtriangle;25B6 blacksmallsquare;25AA blacksmilingface;263B blacksquare;25A0 blackstar;2605 blackupperlefttriangle;25E4 blackupperrighttriangle;25E5 blackuppointingsmalltriangle;25B4 blackuppointingtriangle;25B2 blank;2423 blinebelow;1E07 block;2588 bmonospace;FF42 bobaimaithai;0E1A bohiragana;307C bokatakana;30DC bparen;249D bqsquare;33C3 braceex;F8F4 braceleft;007B braceleftbt;F8F3 braceleftmid;F8F2 braceleftmonospace;FF5B braceleftsmall;FE5B bracelefttp;F8F1 braceleftvertical;FE37 braceright;007D bracerightbt;F8FE bracerightmid;F8FD bracerightmonospace;FF5D bracerightsmall;FE5C bracerighttp;F8FC bracerightvertical;FE38 bracketleft;005B bracketleftbt;F8F0 bracketleftex;F8EF bracketleftmonospace;FF3B bracketlefttp;F8EE bracketright;005D bracketrightbt;F8FB bracketrightex;F8FA bracketrightmonospace;FF3D bracketrighttp;F8F9 breve;02D8 brevebelowcmb;032E brevecmb;0306 breveinvertedbelowcmb;032F breveinvertedcmb;0311 breveinverteddoublecmb;0361 bridgebelowcmb;032A bridgeinvertedbelowcmb;033A brokenbar;00A6 bstroke;0180 bsuperior;F6EA btopbar;0183 buhiragana;3076 bukatakana;30D6 bullet;2022 bulletinverse;25D8 bulletoperator;2219 bullseye;25CE c;0063 caarmenian;056E cabengali;099A cacute;0107 cadeva;091A cagujarati;0A9A cagurmukhi;0A1A calsquare;3388 candrabindubengali;0981 candrabinducmb;0310 candrabindudeva;0901 candrabindugujarati;0A81 capslock;21EA careof;2105 caron;02C7 caronbelowcmb;032C caroncmb;030C carriagereturn;21B5 cbopomofo;3118 ccaron;010D ccedilla;00E7 ccedillaacute;1E09 ccircle;24D2 ccircumflex;0109 ccurl;0255 cdot;010B cdotaccent;010B cdsquare;33C5 cedilla;00B8 cedillacmb;0327 cent;00A2 centigrade;2103 centinferior;F6DF centmonospace;FFE0 centoldstyle;F7A2 centsuperior;F6E0 chaarmenian;0579 chabengali;099B chadeva;091B chagujarati;0A9B chagurmukhi;0A1B chbopomofo;3114 cheabkhasiancyrillic;04BD checkmark;2713 checyrillic;0447 chedescenderabkhasiancyrillic;04BF chedescendercyrillic;04B7 chedieresiscyrillic;04F5 cheharmenian;0573 chekhakassiancyrillic;04CC cheverticalstrokecyrillic;04B9 chi;03C7 chieuchacirclekorean;3277 chieuchaparenkorean;3217 chieuchcirclekorean;3269 chieuchkorean;314A chieuchparenkorean;3209 chochangthai;0E0A chochanthai;0E08 chochingthai;0E09 chochoethai;0E0C chook;0188 cieucacirclekorean;3276 cieucaparenkorean;3216 cieuccirclekorean;3268 cieuckorean;3148 cieucparenkorean;3208 cieucuparenkorean;321C circle;25CB circlemultiply;2297 circleot;2299 circleplus;2295 circlepostalmark;3036 circlewithlefthalfblack;25D0 circlewithrighthalfblack;25D1 circumflex;02C6 circumflexbelowcmb;032D circumflexcmb;0302 clear;2327 clickalveolar;01C2 clickdental;01C0 clicklateral;01C1 clickretroflex;01C3 club;2663 clubsuitblack;2663 clubsuitwhite;2667 cmcubedsquare;33A4 cmonospace;FF43 cmsquaredsquare;33A0 coarmenian;0581 colon;003A colonmonetary;20A1 colonmonospace;FF1A colonsign;20A1 colonsmall;FE55 colontriangularhalfmod;02D1 colontriangularmod;02D0 comma;002C commaabovecmb;0313 commaaboverightcmb;0315 commaaccent;F6C3 commaarabic;060C commaarmenian;055D commainferior;F6E1 commamonospace;FF0C commareversedabovecmb;0314 commareversedmod;02BD commasmall;FE50 commasuperior;F6E2 commaturnedabovecmb;0312 commaturnedmod;02BB compass;263C congruent;2245 contourintegral;222E control;2303 controlACK;0006 controlBEL;0007 controlBS;0008 controlCAN;0018 controlCR;000D controlDC1;0011 controlDC2;0012 controlDC3;0013 controlDC4;0014 controlDEL;007F controlDLE;0010 controlEM;0019 controlENQ;0005 controlEOT;0004 controlESC;001B controlETB;0017 controlETX;0003 controlFF;000C controlFS;001C controlGS;001D controlHT;0009 controlLF;000A controlNAK;0015 controlRS;001E controlSI;000F controlSO;000E controlSOT;0002 controlSTX;0001 controlSUB;001A controlSYN;0016 controlUS;001F controlVT;000B copyright;00A9 copyrightsans;F8E9 copyrightserif;F6D9 cornerbracketleft;300C cornerbracketlefthalfwidth;FF62 cornerbracketleftvertical;FE41 cornerbracketright;300D cornerbracketrighthalfwidth;FF63 cornerbracketrightvertical;FE42 corporationsquare;337F cosquare;33C7 coverkgsquare;33C6 cparen;249E cruzeiro;20A2 cstretched;0297 curlyand;22CF curlyor;22CE currency;00A4 cyrBreve;F6D1 cyrFlex;F6D2 cyrbreve;F6D4 cyrflex;F6D5 d;0064 daarmenian;0564 dabengali;09A6 dadarabic;0636 dadeva;0926 dadfinalarabic;FEBE dadinitialarabic;FEBF dadmedialarabic;FEC0 dagesh;05BC dageshhebrew;05BC dagger;2020 daggerdbl;2021 dagujarati;0AA6 dagurmukhi;0A26 dahiragana;3060 dakatakana;30C0 dalarabic;062F dalet;05D3 daletdagesh;FB33 daletdageshhebrew;FB33 dalethatafpatah;05D3 05B2 dalethatafpatahhebrew;05D3 05B2 dalethatafsegol;05D3 05B1 dalethatafsegolhebrew;05D3 05B1 dalethebrew;05D3 dalethiriq;05D3 05B4 dalethiriqhebrew;05D3 05B4 daletholam;05D3 05B9 daletholamhebrew;05D3 05B9 daletpatah;05D3 05B7 daletpatahhebrew;05D3 05B7 daletqamats;05D3 05B8 daletqamatshebrew;05D3 05B8 daletqubuts;05D3 05BB daletqubutshebrew;05D3 05BB daletsegol;05D3 05B6 daletsegolhebrew;05D3 05B6 daletsheva;05D3 05B0 daletshevahebrew;05D3 05B0 dalettsere;05D3 05B5 dalettserehebrew;05D3 05B5 dalfinalarabic;FEAA dammaarabic;064F dammalowarabic;064F dammatanaltonearabic;064C dammatanarabic;064C danda;0964 dargahebrew;05A7 dargalefthebrew;05A7 dasiapneumatacyrilliccmb;0485 dblGrave;F6D3 dblanglebracketleft;300A dblanglebracketleftvertical;FE3D dblanglebracketright;300B dblanglebracketrightvertical;FE3E dblarchinvertedbelowcmb;032B dblarrowleft;21D4 dblarrowright;21D2 dbldanda;0965 dblgrave;F6D6 dblgravecmb;030F dblintegral;222C dbllowline;2017 dbllowlinecmb;0333 dbloverlinecmb;033F dblprimemod;02BA dblverticalbar;2016 dblverticallineabovecmb;030E dbopomofo;3109 dbsquare;33C8 dcaron;010F dcedilla;1E11 dcircle;24D3 dcircumflexbelow;1E13 dcroat;0111 ddabengali;09A1 ddadeva;0921 ddagujarati;0AA1 ddagurmukhi;0A21 ddalarabic;0688 ddalfinalarabic;FB89 dddhadeva;095C ddhabengali;09A2 ddhadeva;0922 ddhagujarati;0AA2 ddhagurmukhi;0A22 ddotaccent;1E0B ddotbelow;1E0D decimalseparatorarabic;066B decimalseparatorpersian;066B decyrillic;0434 degree;00B0 dehihebrew;05AD dehiragana;3067 deicoptic;03EF dekatakana;30C7 deleteleft;232B deleteright;2326 delta;03B4 deltaturned;018D denominatorminusonenumeratorbengali;09F8 dezh;02A4 dhabengali;09A7 dhadeva;0927 dhagujarati;0AA7 dhagurmukhi;0A27 dhook;0257 dialytikatonos;0385 dialytikatonoscmb;0344 diamond;2666 diamondsuitwhite;2662 dieresis;00A8 dieresisacute;F6D7 dieresisbelowcmb;0324 dieresiscmb;0308 dieresisgrave;F6D8 dieresistonos;0385 dihiragana;3062 dikatakana;30C2 dittomark;3003 divide;00F7 divides;2223 divisionslash;2215 djecyrillic;0452 dkshade;2593 dlinebelow;1E0F dlsquare;3397 dmacron;0111 dmonospace;FF44 dnblock;2584 dochadathai;0E0E dodekthai;0E14 dohiragana;3069 dokatakana;30C9 dollar;0024 dollarinferior;F6E3 dollarmonospace;FF04 dollaroldstyle;F724 dollarsmall;FE69 dollarsuperior;F6E4 dong;20AB dorusquare;3326 dotaccent;02D9 dotaccentcmb;0307 dotbelowcmb;0323 dotbelowcomb;0323 dotkatakana;30FB dotlessi;0131 dotlessj;F6BE dotlessjstrokehook;0284 dotmath;22C5 dottedcircle;25CC doubleyodpatah;FB1F doubleyodpatahhebrew;FB1F downtackbelowcmb;031E downtackmod;02D5 dparen;249F dsuperior;F6EB dtail;0256 dtopbar;018C duhiragana;3065 dukatakana;30C5 dz;01F3 dzaltone;02A3 dzcaron;01C6 dzcurl;02A5 dzeabkhasiancyrillic;04E1 dzecyrillic;0455 dzhecyrillic;045F e;0065 eacute;00E9 earth;2641 ebengali;098F ebopomofo;311C ebreve;0115 ecandradeva;090D ecandragujarati;0A8D ecandravowelsigndeva;0945 ecandravowelsigngujarati;0AC5 ecaron;011B ecedillabreve;1E1D echarmenian;0565 echyiwnarmenian;0587 ecircle;24D4 ecircumflex;00EA ecircumflexacute;1EBF ecircumflexbelow;1E19 ecircumflexdotbelow;1EC7 ecircumflexgrave;1EC1 ecircumflexhookabove;1EC3 ecircumflextilde;1EC5 ecyrillic;0454 edblgrave;0205 edeva;090F edieresis;00EB edot;0117 edotaccent;0117 edotbelow;1EB9 eegurmukhi;0A0F eematragurmukhi;0A47 efcyrillic;0444 egrave;00E8 egujarati;0A8F eharmenian;0567 ehbopomofo;311D ehiragana;3048 ehookabove;1EBB eibopomofo;311F eight;0038 eightarabic;0668 eightbengali;09EE eightcircle;2467 eightcircleinversesansserif;2791 eightdeva;096E eighteencircle;2471 eighteenparen;2485 eighteenperiod;2499 eightgujarati;0AEE eightgurmukhi;0A6E eighthackarabic;0668 eighthangzhou;3028 eighthnotebeamed;266B eightideographicparen;3227 eightinferior;2088 eightmonospace;FF18 eightoldstyle;F738 eightparen;247B eightperiod;248F eightpersian;06F8 eightroman;2177 eightsuperior;2078 eightthai;0E58 einvertedbreve;0207 eiotifiedcyrillic;0465 ekatakana;30A8 ekatakanahalfwidth;FF74 ekonkargurmukhi;0A74 ekorean;3154 elcyrillic;043B element;2208 elevencircle;246A elevenparen;247E elevenperiod;2492 elevenroman;217A ellipsis;2026 ellipsisvertical;22EE emacron;0113 emacronacute;1E17 emacrongrave;1E15 emcyrillic;043C emdash;2014 emdashvertical;FE31 emonospace;FF45 emphasismarkarmenian;055B emptyset;2205 enbopomofo;3123 encyrillic;043D endash;2013 endashvertical;FE32 endescendercyrillic;04A3 eng;014B engbopomofo;3125 enghecyrillic;04A5 enhookcyrillic;04C8 enspace;2002 eogonek;0119 eokorean;3153 eopen;025B eopenclosed;029A eopenreversed;025C eopenreversedclosed;025E eopenreversedhook;025D eparen;24A0 epsilon;03B5 epsilontonos;03AD equal;003D equalmonospace;FF1D equalsmall;FE66 equalsuperior;207C equivalence;2261 erbopomofo;3126 ercyrillic;0440 ereversed;0258 ereversedcyrillic;044D escyrillic;0441 esdescendercyrillic;04AB esh;0283 eshcurl;0286 eshortdeva;090E eshortvowelsigndeva;0946 eshreversedloop;01AA eshsquatreversed;0285 esmallhiragana;3047 esmallkatakana;30A7 esmallkatakanahalfwidth;FF6A estimated;212E esuperior;F6EC eta;03B7 etarmenian;0568 etatonos;03AE eth;00F0 etilde;1EBD etildebelow;1E1B etnahtafoukhhebrew;0591 etnahtafoukhlefthebrew;0591 etnahtahebrew;0591 etnahtalefthebrew;0591 eturned;01DD eukorean;3161 euro;20AC evowelsignbengali;09C7 evowelsigndeva;0947 evowelsigngujarati;0AC7 exclam;0021 exclamarmenian;055C exclamdbl;203C exclamdown;00A1 exclamdownsmall;F7A1 exclammonospace;FF01 exclamsmall;F721 existential;2203 ezh;0292 ezhcaron;01EF ezhcurl;0293 ezhreversed;01B9 ezhtail;01BA f;0066 fadeva;095E fagurmukhi;0A5E fahrenheit;2109 fathaarabic;064E fathalowarabic;064E fathatanarabic;064B fbopomofo;3108 fcircle;24D5 fdotaccent;1E1F feharabic;0641 feharmenian;0586 fehfinalarabic;FED2 fehinitialarabic;FED3 fehmedialarabic;FED4 feicoptic;03E5 female;2640 ff;FB00 ffi;FB03 ffl;FB04 fi;FB01 fifteencircle;246E fifteenparen;2482 fifteenperiod;2496 figuredash;2012 filledbox;25A0 filledrect;25AC finalkaf;05DA finalkafdagesh;FB3A finalkafdageshhebrew;FB3A finalkafhebrew;05DA finalkafqamats;05DA 05B8 finalkafqamatshebrew;05DA 05B8 finalkafsheva;05DA 05B0 finalkafshevahebrew;05DA 05B0 finalmem;05DD finalmemhebrew;05DD finalnun;05DF finalnunhebrew;05DF finalpe;05E3 finalpehebrew;05E3 finaltsadi;05E5 finaltsadihebrew;05E5 firsttonechinese;02C9 fisheye;25C9 fitacyrillic;0473 five;0035 fivearabic;0665 fivebengali;09EB fivecircle;2464 fivecircleinversesansserif;278E fivedeva;096B fiveeighths;215D fivegujarati;0AEB fivegurmukhi;0A6B fivehackarabic;0665 fivehangzhou;3025 fiveideographicparen;3224 fiveinferior;2085 fivemonospace;FF15 fiveoldstyle;F735 fiveparen;2478 fiveperiod;248C fivepersian;06F5 fiveroman;2174 fivesuperior;2075 fivethai;0E55 fl;FB02 florin;0192 fmonospace;FF46 fmsquare;3399 fofanthai;0E1F fofathai;0E1D fongmanthai;0E4F forall;2200 four;0034 fourarabic;0664 fourbengali;09EA fourcircle;2463 fourcircleinversesansserif;278D fourdeva;096A fourgujarati;0AEA fourgurmukhi;0A6A fourhackarabic;0664 fourhangzhou;3024 fourideographicparen;3223 fourinferior;2084 fourmonospace;FF14 fournumeratorbengali;09F7 fouroldstyle;F734 fourparen;2477 fourperiod;248B fourpersian;06F4 fourroman;2173 foursuperior;2074 fourteencircle;246D fourteenparen;2481 fourteenperiod;2495 fourthai;0E54 fourthtonechinese;02CB fparen;24A1 fraction;2044 franc;20A3 g;0067 gabengali;0997 gacute;01F5 gadeva;0917 gafarabic;06AF gaffinalarabic;FB93 gafinitialarabic;FB94 gafmedialarabic;FB95 gagujarati;0A97 gagurmukhi;0A17 gahiragana;304C gakatakana;30AC gamma;03B3 gammalatinsmall;0263 gammasuperior;02E0 gangiacoptic;03EB gbopomofo;310D gbreve;011F gcaron;01E7 gcedilla;0123 gcircle;24D6 gcircumflex;011D gcommaaccent;0123 gdot;0121 gdotaccent;0121 gecyrillic;0433 gehiragana;3052 gekatakana;30B2 geometricallyequal;2251 gereshaccenthebrew;059C gereshhebrew;05F3 gereshmuqdamhebrew;059D germandbls;00DF gershayimaccenthebrew;059E gershayimhebrew;05F4 getamark;3013 ghabengali;0998 ghadarmenian;0572 ghadeva;0918 ghagujarati;0A98 ghagurmukhi;0A18 ghainarabic;063A ghainfinalarabic;FECE ghaininitialarabic;FECF ghainmedialarabic;FED0 ghemiddlehookcyrillic;0495 ghestrokecyrillic;0493 gheupturncyrillic;0491 ghhadeva;095A ghhagurmukhi;0A5A ghook;0260 ghzsquare;3393 gihiragana;304E gikatakana;30AE gimarmenian;0563 gimel;05D2 gimeldagesh;FB32 gimeldageshhebrew;FB32 gimelhebrew;05D2 gjecyrillic;0453 glottalinvertedstroke;01BE glottalstop;0294 glottalstopinverted;0296 glottalstopmod;02C0 glottalstopreversed;0295 glottalstopreversedmod;02C1 glottalstopreversedsuperior;02E4 glottalstopstroke;02A1 glottalstopstrokereversed;02A2 gmacron;1E21 gmonospace;FF47 gohiragana;3054 gokatakana;30B4 gparen;24A2 gpasquare;33AC gradient;2207 grave;0060 gravebelowcmb;0316 gravecmb;0300 gravecomb;0300 gravedeva;0953 gravelowmod;02CE gravemonospace;FF40 gravetonecmb;0340 greater;003E greaterequal;2265 greaterequalorless;22DB greatermonospace;FF1E greaterorequivalent;2273 greaterorless;2277 greateroverequal;2267 greatersmall;FE65 gscript;0261 gstroke;01E5 guhiragana;3050 guillemotleft;00AB guillemotright;00BB guilsinglleft;2039 guilsinglright;203A gukatakana;30B0 guramusquare;3318 gysquare;33C9 h;0068 haabkhasiancyrillic;04A9 haaltonearabic;06C1 habengali;09B9 hadescendercyrillic;04B3 hadeva;0939 hagujarati;0AB9 hagurmukhi;0A39 haharabic;062D hahfinalarabic;FEA2 hahinitialarabic;FEA3 hahiragana;306F hahmedialarabic;FEA4 haitusquare;332A hakatakana;30CF hakatakanahalfwidth;FF8A halantgurmukhi;0A4D hamzaarabic;0621 hamzadammaarabic;0621 064F hamzadammatanarabic;0621 064C hamzafathaarabic;0621 064E hamzafathatanarabic;0621 064B hamzalowarabic;0621 hamzalowkasraarabic;0621 0650 hamzalowkasratanarabic;0621 064D hamzasukunarabic;0621 0652 hangulfiller;3164 hardsigncyrillic;044A harpoonleftbarbup;21BC harpoonrightbarbup;21C0 hasquare;33CA hatafpatah;05B2 hatafpatah16;05B2 hatafpatah23;05B2 hatafpatah2f;05B2 hatafpatahhebrew;05B2 hatafpatahnarrowhebrew;05B2 hatafpatahquarterhebrew;05B2 hatafpatahwidehebrew;05B2 hatafqamats;05B3 hatafqamats1b;05B3 hatafqamats28;05B3 hatafqamats34;05B3 hatafqamatshebrew;05B3 hatafqamatsnarrowhebrew;05B3 hatafqamatsquarterhebrew;05B3 hatafqamatswidehebrew;05B3 hatafsegol;05B1 hatafsegol17;05B1 hatafsegol24;05B1 hatafsegol30;05B1 hatafsegolhebrew;05B1 hatafsegolnarrowhebrew;05B1 hatafsegolquarterhebrew;05B1 hatafsegolwidehebrew;05B1 hbar;0127 hbopomofo;310F hbrevebelow;1E2B hcedilla;1E29 hcircle;24D7 hcircumflex;0125 hdieresis;1E27 hdotaccent;1E23 hdotbelow;1E25 he;05D4 heart;2665 heartsuitblack;2665 heartsuitwhite;2661 hedagesh;FB34 hedageshhebrew;FB34 hehaltonearabic;06C1 heharabic;0647 hehebrew;05D4 hehfinalaltonearabic;FBA7 hehfinalalttwoarabic;FEEA hehfinalarabic;FEEA hehhamzaabovefinalarabic;FBA5 hehhamzaaboveisolatedarabic;FBA4 hehinitialaltonearabic;FBA8 hehinitialarabic;FEEB hehiragana;3078 hehmedialaltonearabic;FBA9 hehmedialarabic;FEEC heiseierasquare;337B hekatakana;30D8 hekatakanahalfwidth;FF8D hekutaarusquare;3336 henghook;0267 herutusquare;3339 het;05D7 hethebrew;05D7 hhook;0266 hhooksuperior;02B1 hieuhacirclekorean;327B hieuhaparenkorean;321B hieuhcirclekorean;326D hieuhkorean;314E hieuhparenkorean;320D hihiragana;3072 hikatakana;30D2 hikatakanahalfwidth;FF8B hiriq;05B4 hiriq14;05B4 hiriq21;05B4 hiriq2d;05B4 hiriqhebrew;05B4 hiriqnarrowhebrew;05B4 hiriqquarterhebrew;05B4 hiriqwidehebrew;05B4 hlinebelow;1E96 hmonospace;FF48 hoarmenian;0570 hohipthai;0E2B hohiragana;307B hokatakana;30DB hokatakanahalfwidth;FF8E holam;05B9 holam19;05B9 holam26;05B9 holam32;05B9 holamhebrew;05B9 holamnarrowhebrew;05B9 holamquarterhebrew;05B9 holamwidehebrew;05B9 honokhukthai;0E2E hookabovecomb;0309 hookcmb;0309 hookpalatalizedbelowcmb;0321 hookretroflexbelowcmb;0322 hoonsquare;3342 horicoptic;03E9 horizontalbar;2015 horncmb;031B hotsprings;2668 house;2302 hparen;24A3 hsuperior;02B0 hturned;0265 huhiragana;3075 huiitosquare;3333 hukatakana;30D5 hukatakanahalfwidth;FF8C hungarumlaut;02DD hungarumlautcmb;030B hv;0195 hyphen;002D hypheninferior;F6E5 hyphenmonospace;FF0D hyphensmall;FE63 hyphensuperior;F6E6 hyphentwo;2010 i;0069 iacute;00ED iacyrillic;044F ibengali;0987 ibopomofo;3127 ibreve;012D icaron;01D0 icircle;24D8 icircumflex;00EE icyrillic;0456 idblgrave;0209 ideographearthcircle;328F ideographfirecircle;328B ideographicallianceparen;323F ideographiccallparen;323A ideographiccentrecircle;32A5 ideographicclose;3006 ideographiccomma;3001 ideographiccommaleft;FF64 ideographiccongratulationparen;3237 ideographiccorrectcircle;32A3 ideographicearthparen;322F ideographicenterpriseparen;323D ideographicexcellentcircle;329D ideographicfestivalparen;3240 ideographicfinancialcircle;3296 ideographicfinancialparen;3236 ideographicfireparen;322B ideographichaveparen;3232 ideographichighcircle;32A4 ideographiciterationmark;3005 ideographiclaborcircle;3298 ideographiclaborparen;3238 ideographicleftcircle;32A7 ideographiclowcircle;32A6 ideographicmedicinecircle;32A9 ideographicmetalparen;322E ideographicmoonparen;322A ideographicnameparen;3234 ideographicperiod;3002 ideographicprintcircle;329E ideographicreachparen;3243 ideographicrepresentparen;3239 ideographicresourceparen;323E ideographicrightcircle;32A8 ideographicsecretcircle;3299 ideographicselfparen;3242 ideographicsocietyparen;3233 ideographicspace;3000 ideographicspecialparen;3235 ideographicstockparen;3231 ideographicstudyparen;323B ideographicsunparen;3230 ideographicsuperviseparen;323C ideographicwaterparen;322C ideographicwoodparen;322D ideographiczero;3007 ideographmetalcircle;328E ideographmooncircle;328A ideographnamecircle;3294 ideographsuncircle;3290 ideographwatercircle;328C ideographwoodcircle;328D ideva;0907 idieresis;00EF idieresisacute;1E2F idieresiscyrillic;04E5 idotbelow;1ECB iebrevecyrillic;04D7 iecyrillic;0435 ieungacirclekorean;3275 ieungaparenkorean;3215 ieungcirclekorean;3267 ieungkorean;3147 ieungparenkorean;3207 igrave;00EC igujarati;0A87 igurmukhi;0A07 ihiragana;3044 ihookabove;1EC9 iibengali;0988 iicyrillic;0438 iideva;0908 iigujarati;0A88 iigurmukhi;0A08 iimatragurmukhi;0A40 iinvertedbreve;020B iishortcyrillic;0439 iivowelsignbengali;09C0 iivowelsigndeva;0940 iivowelsigngujarati;0AC0 ij;0133 ikatakana;30A4 ikatakanahalfwidth;FF72 ikorean;3163 ilde;02DC iluyhebrew;05AC imacron;012B imacroncyrillic;04E3 imageorapproximatelyequal;2253 imatragurmukhi;0A3F imonospace;FF49 increment;2206 infinity;221E iniarmenian;056B integral;222B integralbottom;2321 integralbt;2321 integralex;F8F5 integraltop;2320 integraltp;2320 intersection;2229 intisquare;3305 invbullet;25D8 invcircle;25D9 invsmileface;263B iocyrillic;0451 iogonek;012F iota;03B9 iotadieresis;03CA iotadieresistonos;0390 iotalatin;0269 iotatonos;03AF iparen;24A4 irigurmukhi;0A72 ismallhiragana;3043 ismallkatakana;30A3 ismallkatakanahalfwidth;FF68 issharbengali;09FA istroke;0268 isuperior;F6ED iterationhiragana;309D iterationkatakana;30FD itilde;0129 itildebelow;1E2D iubopomofo;3129 iucyrillic;044E ivowelsignbengali;09BF ivowelsigndeva;093F ivowelsigngujarati;0ABF izhitsacyrillic;0475 izhitsadblgravecyrillic;0477 j;006A jaarmenian;0571 jabengali;099C jadeva;091C jagujarati;0A9C jagurmukhi;0A1C jbopomofo;3110 jcaron;01F0 jcircle;24D9 jcircumflex;0135 jcrossedtail;029D jdotlessstroke;025F jecyrillic;0458 jeemarabic;062C jeemfinalarabic;FE9E jeeminitialarabic;FE9F jeemmedialarabic;FEA0 jeharabic;0698 jehfinalarabic;FB8B jhabengali;099D jhadeva;091D jhagujarati;0A9D jhagurmukhi;0A1D jheharmenian;057B jis;3004 jmonospace;FF4A jparen;24A5 jsuperior;02B2 k;006B kabashkircyrillic;04A1 kabengali;0995 kacute;1E31 kacyrillic;043A kadescendercyrillic;049B kadeva;0915 kaf;05DB kafarabic;0643 kafdagesh;FB3B kafdageshhebrew;FB3B kaffinalarabic;FEDA kafhebrew;05DB kafinitialarabic;FEDB kafmedialarabic;FEDC kafrafehebrew;FB4D kagujarati;0A95 kagurmukhi;0A15 kahiragana;304B kahookcyrillic;04C4 kakatakana;30AB kakatakanahalfwidth;FF76 kappa;03BA kappasymbolgreek;03F0 kapyeounmieumkorean;3171 kapyeounphieuphkorean;3184 kapyeounpieupkorean;3178 kapyeounssangpieupkorean;3179 karoriisquare;330D kashidaautoarabic;0640 kashidaautonosidebearingarabic;0640 kasmallkatakana;30F5 kasquare;3384 kasraarabic;0650 kasratanarabic;064D kastrokecyrillic;049F katahiraprolongmarkhalfwidth;FF70 kaverticalstrokecyrillic;049D kbopomofo;310E kcalsquare;3389 kcaron;01E9 kcedilla;0137 kcircle;24DA kcommaaccent;0137 kdotbelow;1E33 keharmenian;0584 kehiragana;3051 kekatakana;30B1 kekatakanahalfwidth;FF79 kenarmenian;056F kesmallkatakana;30F6 kgreenlandic;0138 khabengali;0996 khacyrillic;0445 khadeva;0916 khagujarati;0A96 khagurmukhi;0A16 khaharabic;062E khahfinalarabic;FEA6 khahinitialarabic;FEA7 khahmedialarabic;FEA8 kheicoptic;03E7 khhadeva;0959 khhagurmukhi;0A59 khieukhacirclekorean;3278 khieukhaparenkorean;3218 khieukhcirclekorean;326A khieukhkorean;314B khieukhparenkorean;320A khokhaithai;0E02 khokhonthai;0E05 khokhuatthai;0E03 khokhwaithai;0E04 khomutthai;0E5B khook;0199 khorakhangthai;0E06 khzsquare;3391 kihiragana;304D kikatakana;30AD kikatakanahalfwidth;FF77 kiroguramusquare;3315 kiromeetorusquare;3316 kirosquare;3314 kiyeokacirclekorean;326E kiyeokaparenkorean;320E kiyeokcirclekorean;3260 kiyeokkorean;3131 kiyeokparenkorean;3200 kiyeoksioskorean;3133 kjecyrillic;045C klinebelow;1E35 klsquare;3398 kmcubedsquare;33A6 kmonospace;FF4B kmsquaredsquare;33A2 kohiragana;3053 kohmsquare;33C0 kokaithai;0E01 kokatakana;30B3 kokatakanahalfwidth;FF7A kooposquare;331E koppacyrillic;0481 koreanstandardsymbol;327F koroniscmb;0343 kparen;24A6 kpasquare;33AA ksicyrillic;046F ktsquare;33CF kturned;029E kuhiragana;304F kukatakana;30AF kukatakanahalfwidth;FF78 kvsquare;33B8 kwsquare;33BE l;006C labengali;09B2 lacute;013A ladeva;0932 lagujarati;0AB2 lagurmukhi;0A32 lakkhangyaothai;0E45 lamaleffinalarabic;FEFC lamalefhamzaabovefinalarabic;FEF8 lamalefhamzaaboveisolatedarabic;FEF7 lamalefhamzabelowfinalarabic;FEFA lamalefhamzabelowisolatedarabic;FEF9 lamalefisolatedarabic;FEFB lamalefmaddaabovefinalarabic;FEF6 lamalefmaddaaboveisolatedarabic;FEF5 lamarabic;0644 lambda;03BB lambdastroke;019B lamed;05DC lameddagesh;FB3C lameddageshhebrew;FB3C lamedhebrew;05DC lamedholam;05DC 05B9 lamedholamdagesh;05DC 05B9 05BC lamedholamdageshhebrew;05DC 05B9 05BC lamedholamhebrew;05DC 05B9 lamfinalarabic;FEDE lamhahinitialarabic;FCCA laminitialarabic;FEDF lamjeeminitialarabic;FCC9 lamkhahinitialarabic;FCCB lamlamhehisolatedarabic;FDF2 lammedialarabic;FEE0 lammeemhahinitialarabic;FD88 lammeeminitialarabic;FCCC lammeemjeeminitialarabic;FEDF FEE4 FEA0 lammeemkhahinitialarabic;FEDF FEE4 FEA8 largecircle;25EF lbar;019A lbelt;026C lbopomofo;310C lcaron;013E lcedilla;013C lcircle;24DB lcircumflexbelow;1E3D lcommaaccent;013C ldot;0140 ldotaccent;0140 ldotbelow;1E37 ldotbelowmacron;1E39 leftangleabovecmb;031A lefttackbelowcmb;0318 less;003C lessequal;2264 lessequalorgreater;22DA lessmonospace;FF1C lessorequivalent;2272 lessorgreater;2276 lessoverequal;2266 lesssmall;FE64 lezh;026E lfblock;258C lhookretroflex;026D lira;20A4 liwnarmenian;056C lj;01C9 ljecyrillic;0459 ll;F6C0 lladeva;0933 llagujarati;0AB3 llinebelow;1E3B llladeva;0934 llvocalicbengali;09E1 llvocalicdeva;0961 llvocalicvowelsignbengali;09E3 llvocalicvowelsigndeva;0963 lmiddletilde;026B lmonospace;FF4C lmsquare;33D0 lochulathai;0E2C logicaland;2227 logicalnot;00AC logicalnotreversed;2310 logicalor;2228 lolingthai;0E25 longs;017F lowlinecenterline;FE4E lowlinecmb;0332 lowlinedashed;FE4D lozenge;25CA lparen;24A7 lslash;0142 lsquare;2113 lsuperior;F6EE ltshade;2591 luthai;0E26 lvocalicbengali;098C lvocalicdeva;090C lvocalicvowelsignbengali;09E2 lvocalicvowelsigndeva;0962 lxsquare;33D3 m;006D mabengali;09AE macron;00AF macronbelowcmb;0331 macroncmb;0304 macronlowmod;02CD macronmonospace;FFE3 macute;1E3F madeva;092E magujarati;0AAE magurmukhi;0A2E mahapakhhebrew;05A4 mahapakhlefthebrew;05A4 mahiragana;307E maichattawalowleftthai;F895 maichattawalowrightthai;F894 maichattawathai;0E4B maichattawaupperleftthai;F893 maieklowleftthai;F88C maieklowrightthai;F88B maiekthai;0E48 maiekupperleftthai;F88A maihanakatleftthai;F884 maihanakatthai;0E31 maitaikhuleftthai;F889 maitaikhuthai;0E47 maitholowleftthai;F88F maitholowrightthai;F88E maithothai;0E49 maithoupperleftthai;F88D maitrilowleftthai;F892 maitrilowrightthai;F891 maitrithai;0E4A maitriupperleftthai;F890 maiyamokthai;0E46 makatakana;30DE makatakanahalfwidth;FF8F male;2642 mansyonsquare;3347 maqafhebrew;05BE mars;2642 masoracirclehebrew;05AF masquare;3383 mbopomofo;3107 mbsquare;33D4 mcircle;24DC mcubedsquare;33A5 mdotaccent;1E41 mdotbelow;1E43 meemarabic;0645 meemfinalarabic;FEE2 meeminitialarabic;FEE3 meemmedialarabic;FEE4 meemmeeminitialarabic;FCD1 meemmeemisolatedarabic;FC48 meetorusquare;334D mehiragana;3081 meizierasquare;337E mekatakana;30E1 mekatakanahalfwidth;FF92 mem;05DE memdagesh;FB3E memdageshhebrew;FB3E memhebrew;05DE menarmenian;0574 merkhahebrew;05A5 merkhakefulahebrew;05A6 merkhakefulalefthebrew;05A6 merkhalefthebrew;05A5 mhook;0271 mhzsquare;3392 middledotkatakanahalfwidth;FF65 middot;00B7 mieumacirclekorean;3272 mieumaparenkorean;3212 mieumcirclekorean;3264 mieumkorean;3141 mieumpansioskorean;3170 mieumparenkorean;3204 mieumpieupkorean;316E mieumsioskorean;316F mihiragana;307F mikatakana;30DF mikatakanahalfwidth;FF90 minus;2212 minusbelowcmb;0320 minuscircle;2296 minusmod;02D7 minusplus;2213 minute;2032 miribaarusquare;334A mirisquare;3349 mlonglegturned;0270 mlsquare;3396 mmcubedsquare;33A3 mmonospace;FF4D mmsquaredsquare;339F mohiragana;3082 mohmsquare;33C1 mokatakana;30E2 mokatakanahalfwidth;FF93 molsquare;33D6 momathai;0E21 moverssquare;33A7 moverssquaredsquare;33A8 mparen;24A8 mpasquare;33AB mssquare;33B3 msuperior;F6EF mturned;026F mu;00B5 mu1;00B5 muasquare;3382 muchgreater;226B muchless;226A mufsquare;338C mugreek;03BC mugsquare;338D muhiragana;3080 mukatakana;30E0 mukatakanahalfwidth;FF91 mulsquare;3395 multiply;00D7 mumsquare;339B munahhebrew;05A3 munahlefthebrew;05A3 musicalnote;266A musicalnotedbl;266B musicflatsign;266D musicsharpsign;266F mussquare;33B2 muvsquare;33B6 muwsquare;33BC mvmegasquare;33B9 mvsquare;33B7 mwmegasquare;33BF mwsquare;33BD n;006E nabengali;09A8 nabla;2207 nacute;0144 nadeva;0928 nagujarati;0AA8 nagurmukhi;0A28 nahiragana;306A nakatakana;30CA nakatakanahalfwidth;FF85 napostrophe;0149 nasquare;3381 nbopomofo;310B nbspace;00A0 ncaron;0148 ncedilla;0146 ncircle;24DD ncircumflexbelow;1E4B ncommaaccent;0146 ndotaccent;1E45 ndotbelow;1E47 nehiragana;306D nekatakana;30CD nekatakanahalfwidth;FF88 newsheqelsign;20AA nfsquare;338B ngabengali;0999 ngadeva;0919 ngagujarati;0A99 ngagurmukhi;0A19 ngonguthai;0E07 nhiragana;3093 nhookleft;0272 nhookretroflex;0273 nieunacirclekorean;326F nieunaparenkorean;320F nieuncieuckorean;3135 nieuncirclekorean;3261 nieunhieuhkorean;3136 nieunkorean;3134 nieunpansioskorean;3168 nieunparenkorean;3201 nieunsioskorean;3167 nieuntikeutkorean;3166 nihiragana;306B nikatakana;30CB nikatakanahalfwidth;FF86 nikhahitleftthai;F899 nikhahitthai;0E4D nine;0039 ninearabic;0669 ninebengali;09EF ninecircle;2468 ninecircleinversesansserif;2792 ninedeva;096F ninegujarati;0AEF ninegurmukhi;0A6F ninehackarabic;0669 ninehangzhou;3029 nineideographicparen;3228 nineinferior;2089 ninemonospace;FF19 nineoldstyle;F739 nineparen;247C nineperiod;2490 ninepersian;06F9 nineroman;2178 ninesuperior;2079 nineteencircle;2472 nineteenparen;2486 nineteenperiod;249A ninethai;0E59 nj;01CC njecyrillic;045A nkatakana;30F3 nkatakanahalfwidth;FF9D nlegrightlong;019E nlinebelow;1E49 nmonospace;FF4E nmsquare;339A nnabengali;09A3 nnadeva;0923 nnagujarati;0AA3 nnagurmukhi;0A23 nnnadeva;0929 nohiragana;306E nokatakana;30CE nokatakanahalfwidth;FF89 nonbreakingspace;00A0 nonenthai;0E13 nonuthai;0E19 noonarabic;0646 noonfinalarabic;FEE6 noonghunnaarabic;06BA noonghunnafinalarabic;FB9F noonhehinitialarabic;FEE7 FEEC nooninitialarabic;FEE7 noonjeeminitialarabic;FCD2 noonjeemisolatedarabic;FC4B noonmedialarabic;FEE8 noonmeeminitialarabic;FCD5 noonmeemisolatedarabic;FC4E noonnoonfinalarabic;FC8D notcontains;220C notelement;2209 notelementof;2209 notequal;2260 notgreater;226F notgreaternorequal;2271 notgreaternorless;2279 notidentical;2262 notless;226E notlessnorequal;2270 notparallel;2226 notprecedes;2280 notsubset;2284 notsucceeds;2281 notsuperset;2285 nowarmenian;0576 nparen;24A9 nssquare;33B1 nsuperior;207F ntilde;00F1 nu;03BD nuhiragana;306C nukatakana;30CC nukatakanahalfwidth;FF87 nuktabengali;09BC nuktadeva;093C nuktagujarati;0ABC nuktagurmukhi;0A3C numbersign;0023 numbersignmonospace;FF03 numbersignsmall;FE5F numeralsigngreek;0374 numeralsignlowergreek;0375 numero;2116 nun;05E0 nundagesh;FB40 nundageshhebrew;FB40 nunhebrew;05E0 nvsquare;33B5 nwsquare;33BB nyabengali;099E nyadeva;091E nyagujarati;0A9E nyagurmukhi;0A1E o;006F oacute;00F3 oangthai;0E2D obarred;0275 obarredcyrillic;04E9 obarreddieresiscyrillic;04EB obengali;0993 obopomofo;311B obreve;014F ocandradeva;0911 ocandragujarati;0A91 ocandravowelsigndeva;0949 ocandravowelsigngujarati;0AC9 ocaron;01D2 ocircle;24DE ocircumflex;00F4 ocircumflexacute;1ED1 ocircumflexdotbelow;1ED9 ocircumflexgrave;1ED3 ocircumflexhookabove;1ED5 ocircumflextilde;1ED7 ocyrillic;043E odblacute;0151 odblgrave;020D odeva;0913 odieresis;00F6 odieresiscyrillic;04E7 odotbelow;1ECD oe;0153 oekorean;315A ogonek;02DB ogonekcmb;0328 ograve;00F2 ogujarati;0A93 oharmenian;0585 ohiragana;304A ohookabove;1ECF ohorn;01A1 ohornacute;1EDB ohorndotbelow;1EE3 ohorngrave;1EDD ohornhookabove;1EDF ohorntilde;1EE1 ohungarumlaut;0151 oi;01A3 oinvertedbreve;020F okatakana;30AA okatakanahalfwidth;FF75 okorean;3157 olehebrew;05AB omacron;014D omacronacute;1E53 omacrongrave;1E51 omdeva;0950 omega;03C9 omega1;03D6 omegacyrillic;0461 omegalatinclosed;0277 omegaroundcyrillic;047B omegatitlocyrillic;047D omegatonos;03CE omgujarati;0AD0 omicron;03BF omicrontonos;03CC omonospace;FF4F one;0031 onearabic;0661 onebengali;09E7 onecircle;2460 onecircleinversesansserif;278A onedeva;0967 onedotenleader;2024 oneeighth;215B onefitted;F6DC onegujarati;0AE7 onegurmukhi;0A67 onehackarabic;0661 onehalf;00BD onehangzhou;3021 oneideographicparen;3220 oneinferior;2081 onemonospace;FF11 onenumeratorbengali;09F4 oneoldstyle;F731 oneparen;2474 oneperiod;2488 onepersian;06F1 onequarter;00BC oneroman;2170 onesuperior;00B9 onethai;0E51 onethird;2153 oogonek;01EB oogonekmacron;01ED oogurmukhi;0A13 oomatragurmukhi;0A4B oopen;0254 oparen;24AA openbullet;25E6 option;2325 ordfeminine;00AA ordmasculine;00BA orthogonal;221F oshortdeva;0912 oshortvowelsigndeva;094A oslash;00F8 oslashacute;01FF osmallhiragana;3049 osmallkatakana;30A9 osmallkatakanahalfwidth;FF6B ostrokeacute;01FF osuperior;F6F0 otcyrillic;047F otilde;00F5 otildeacute;1E4D otildedieresis;1E4F oubopomofo;3121 overline;203E overlinecenterline;FE4A overlinecmb;0305 overlinedashed;FE49 overlinedblwavy;FE4C overlinewavy;FE4B overscore;00AF ovowelsignbengali;09CB ovowelsigndeva;094B ovowelsigngujarati;0ACB p;0070 paampssquare;3380 paasentosquare;332B pabengali;09AA pacute;1E55 padeva;092A pagedown;21DF pageup;21DE pagujarati;0AAA pagurmukhi;0A2A pahiragana;3071 paiyannoithai;0E2F pakatakana;30D1 palatalizationcyrilliccmb;0484 palochkacyrillic;04C0 pansioskorean;317F paragraph;00B6 parallel;2225 parenleft;0028 parenleftaltonearabic;FD3E parenleftbt;F8ED parenleftex;F8EC parenleftinferior;208D parenleftmonospace;FF08 parenleftsmall;FE59 parenleftsuperior;207D parenlefttp;F8EB parenleftvertical;FE35 parenright;0029 parenrightaltonearabic;FD3F parenrightbt;F8F8 parenrightex;F8F7 parenrightinferior;208E parenrightmonospace;FF09 parenrightsmall;FE5A parenrightsuperior;207E parenrighttp;F8F6 parenrightvertical;FE36 partialdiff;2202 paseqhebrew;05C0 pashtahebrew;0599 pasquare;33A9 patah;05B7 patah11;05B7 patah1d;05B7 patah2a;05B7 patahhebrew;05B7 patahnarrowhebrew;05B7 patahquarterhebrew;05B7 patahwidehebrew;05B7 pazerhebrew;05A1 pbopomofo;3106 pcircle;24DF pdotaccent;1E57 pe;05E4 pecyrillic;043F pedagesh;FB44 pedageshhebrew;FB44 peezisquare;333B pefinaldageshhebrew;FB43 peharabic;067E peharmenian;057A pehebrew;05E4 pehfinalarabic;FB57 pehinitialarabic;FB58 pehiragana;307A pehmedialarabic;FB59 pekatakana;30DA pemiddlehookcyrillic;04A7 perafehebrew;FB4E percent;0025 percentarabic;066A percentmonospace;FF05 percentsmall;FE6A period;002E periodarmenian;0589 periodcentered;00B7 periodhalfwidth;FF61 periodinferior;F6E7 periodmonospace;FF0E periodsmall;FE52 periodsuperior;F6E8 perispomenigreekcmb;0342 perpendicular;22A5 perthousand;2030 peseta;20A7 pfsquare;338A phabengali;09AB phadeva;092B phagujarati;0AAB phagurmukhi;0A2B phi;03C6 phi1;03D5 phieuphacirclekorean;327A phieuphaparenkorean;321A phieuphcirclekorean;326C phieuphkorean;314D phieuphparenkorean;320C philatin;0278 phinthuthai;0E3A phisymbolgreek;03D5 phook;01A5 phophanthai;0E1E phophungthai;0E1C phosamphaothai;0E20 pi;03C0 pieupacirclekorean;3273 pieupaparenkorean;3213 pieupcieuckorean;3176 pieupcirclekorean;3265 pieupkiyeokkorean;3172 pieupkorean;3142 pieupparenkorean;3205 pieupsioskiyeokkorean;3174 pieupsioskorean;3144 pieupsiostikeutkorean;3175 pieupthieuthkorean;3177 pieuptikeutkorean;3173 pihiragana;3074 pikatakana;30D4 pisymbolgreek;03D6 piwrarmenian;0583 plus;002B plusbelowcmb;031F pluscircle;2295 plusminus;00B1 plusmod;02D6 plusmonospace;FF0B plussmall;FE62 plussuperior;207A pmonospace;FF50 pmsquare;33D8 pohiragana;307D pointingindexdownwhite;261F pointingindexleftwhite;261C pointingindexrightwhite;261E pointingindexupwhite;261D pokatakana;30DD poplathai;0E1B postalmark;3012 postalmarkface;3020 pparen;24AB precedes;227A prescription;211E primemod;02B9 primereversed;2035 product;220F projective;2305 prolongedkana;30FC propellor;2318 propersubset;2282 propersuperset;2283 proportion;2237 proportional;221D psi;03C8 psicyrillic;0471 psilipneumatacyrilliccmb;0486 pssquare;33B0 puhiragana;3077 pukatakana;30D7 pvsquare;33B4 pwsquare;33BA q;0071 qadeva;0958 qadmahebrew;05A8 qafarabic;0642 qaffinalarabic;FED6 qafinitialarabic;FED7 qafmedialarabic;FED8 qamats;05B8 qamats10;05B8 qamats1a;05B8 qamats1c;05B8 qamats27;05B8 qamats29;05B8 qamats33;05B8 qamatsde;05B8 qamatshebrew;05B8 qamatsnarrowhebrew;05B8 qamatsqatanhebrew;05B8 qamatsqatannarrowhebrew;05B8 qamatsqatanquarterhebrew;05B8 qamatsqatanwidehebrew;05B8 qamatsquarterhebrew;05B8 qamatswidehebrew;05B8 qarneyparahebrew;059F qbopomofo;3111 qcircle;24E0 qhook;02A0 qmonospace;FF51 qof;05E7 qofdagesh;FB47 qofdageshhebrew;FB47 qofhatafpatah;05E7 05B2 qofhatafpatahhebrew;05E7 05B2 qofhatafsegol;05E7 05B1 qofhatafsegolhebrew;05E7 05B1 qofhebrew;05E7 qofhiriq;05E7 05B4 qofhiriqhebrew;05E7 05B4 qofholam;05E7 05B9 qofholamhebrew;05E7 05B9 qofpatah;05E7 05B7 qofpatahhebrew;05E7 05B7 qofqamats;05E7 05B8 qofqamatshebrew;05E7 05B8 qofqubuts;05E7 05BB qofqubutshebrew;05E7 05BB qofsegol;05E7 05B6 qofsegolhebrew;05E7 05B6 qofsheva;05E7 05B0 qofshevahebrew;05E7 05B0 qoftsere;05E7 05B5 qoftserehebrew;05E7 05B5 qparen;24AC quarternote;2669 qubuts;05BB qubuts18;05BB qubuts25;05BB qubuts31;05BB qubutshebrew;05BB qubutsnarrowhebrew;05BB qubutsquarterhebrew;05BB qubutswidehebrew;05BB question;003F questionarabic;061F questionarmenian;055E questiondown;00BF questiondownsmall;F7BF questiongreek;037E questionmonospace;FF1F questionsmall;F73F quotedbl;0022 quotedblbase;201E quotedblleft;201C quotedblmonospace;FF02 quotedblprime;301E quotedblprimereversed;301D quotedblright;201D quoteleft;2018 quoteleftreversed;201B quotereversed;201B quoteright;2019 quoterightn;0149 quotesinglbase;201A quotesingle;0027 quotesinglemonospace;FF07 r;0072 raarmenian;057C rabengali;09B0 racute;0155 radeva;0930 radical;221A radicalex;F8E5 radoverssquare;33AE radoverssquaredsquare;33AF radsquare;33AD rafe;05BF rafehebrew;05BF ragujarati;0AB0 ragurmukhi;0A30 rahiragana;3089 rakatakana;30E9 rakatakanahalfwidth;FF97 ralowerdiagonalbengali;09F1 ramiddlediagonalbengali;09F0 ramshorn;0264 ratio;2236 rbopomofo;3116 rcaron;0159 rcedilla;0157 rcircle;24E1 rcommaaccent;0157 rdblgrave;0211 rdotaccent;1E59 rdotbelow;1E5B rdotbelowmacron;1E5D referencemark;203B reflexsubset;2286 reflexsuperset;2287 registered;00AE registersans;F8E8 registerserif;F6DA reharabic;0631 reharmenian;0580 rehfinalarabic;FEAE rehiragana;308C rehyehaleflamarabic;0631 FEF3 FE8E 0644 rekatakana;30EC rekatakanahalfwidth;FF9A resh;05E8 reshdageshhebrew;FB48 reshhatafpatah;05E8 05B2 reshhatafpatahhebrew;05E8 05B2 reshhatafsegol;05E8 05B1 reshhatafsegolhebrew;05E8 05B1 reshhebrew;05E8 reshhiriq;05E8 05B4 reshhiriqhebrew;05E8 05B4 reshholam;05E8 05B9 reshholamhebrew;05E8 05B9 reshpatah;05E8 05B7 reshpatahhebrew;05E8 05B7 reshqamats;05E8 05B8 reshqamatshebrew;05E8 05B8 reshqubuts;05E8 05BB reshqubutshebrew;05E8 05BB reshsegol;05E8 05B6 reshsegolhebrew;05E8 05B6 reshsheva;05E8 05B0 reshshevahebrew;05E8 05B0 reshtsere;05E8 05B5 reshtserehebrew;05E8 05B5 reversedtilde;223D reviahebrew;0597 reviamugrashhebrew;0597 revlogicalnot;2310 rfishhook;027E rfishhookreversed;027F rhabengali;09DD rhadeva;095D rho;03C1 rhook;027D rhookturned;027B rhookturnedsuperior;02B5 rhosymbolgreek;03F1 rhotichookmod;02DE rieulacirclekorean;3271 rieulaparenkorean;3211 rieulcirclekorean;3263 rieulhieuhkorean;3140 rieulkiyeokkorean;313A rieulkiyeoksioskorean;3169 rieulkorean;3139 rieulmieumkorean;313B rieulpansioskorean;316C rieulparenkorean;3203 rieulphieuphkorean;313F rieulpieupkorean;313C rieulpieupsioskorean;316B rieulsioskorean;313D rieulthieuthkorean;313E rieultikeutkorean;316A rieulyeorinhieuhkorean;316D rightangle;221F righttackbelowcmb;0319 righttriangle;22BF rihiragana;308A rikatakana;30EA rikatakanahalfwidth;FF98 ring;02DA ringbelowcmb;0325 ringcmb;030A ringhalfleft;02BF ringhalfleftarmenian;0559 ringhalfleftbelowcmb;031C ringhalfleftcentered;02D3 ringhalfright;02BE ringhalfrightbelowcmb;0339 ringhalfrightcentered;02D2 rinvertedbreve;0213 rittorusquare;3351 rlinebelow;1E5F rlongleg;027C rlonglegturned;027A rmonospace;FF52 rohiragana;308D rokatakana;30ED rokatakanahalfwidth;FF9B roruathai;0E23 rparen;24AD rrabengali;09DC rradeva;0931 rragurmukhi;0A5C rreharabic;0691 rrehfinalarabic;FB8D rrvocalicbengali;09E0 rrvocalicdeva;0960 rrvocalicgujarati;0AE0 rrvocalicvowelsignbengali;09C4 rrvocalicvowelsigndeva;0944 rrvocalicvowelsigngujarati;0AC4 rsuperior;F6F1 rtblock;2590 rturned;0279 rturnedsuperior;02B4 ruhiragana;308B rukatakana;30EB rukatakanahalfwidth;FF99 rupeemarkbengali;09F2 rupeesignbengali;09F3 rupiah;F6DD ruthai;0E24 rvocalicbengali;098B rvocalicdeva;090B rvocalicgujarati;0A8B rvocalicvowelsignbengali;09C3 rvocalicvowelsigndeva;0943 rvocalicvowelsigngujarati;0AC3 s;0073 sabengali;09B8 sacute;015B sacutedotaccent;1E65 sadarabic;0635 sadeva;0938 sadfinalarabic;FEBA sadinitialarabic;FEBB sadmedialarabic;FEBC sagujarati;0AB8 sagurmukhi;0A38 sahiragana;3055 sakatakana;30B5 sakatakanahalfwidth;FF7B sallallahoualayhewasallamarabic;FDFA samekh;05E1 samekhdagesh;FB41 samekhdageshhebrew;FB41 samekhhebrew;05E1 saraaathai;0E32 saraaethai;0E41 saraaimaimalaithai;0E44 saraaimaimuanthai;0E43 saraamthai;0E33 saraathai;0E30 saraethai;0E40 saraiileftthai;F886 saraiithai;0E35 saraileftthai;F885 saraithai;0E34 saraothai;0E42 saraueeleftthai;F888 saraueethai;0E37 saraueleftthai;F887 sarauethai;0E36 sarauthai;0E38 sarauuthai;0E39 sbopomofo;3119 scaron;0161 scarondotaccent;1E67 scedilla;015F schwa;0259 schwacyrillic;04D9 schwadieresiscyrillic;04DB schwahook;025A scircle;24E2 scircumflex;015D scommaaccent;0219 sdotaccent;1E61 sdotbelow;1E63 sdotbelowdotaccent;1E69 seagullbelowcmb;033C second;2033 secondtonechinese;02CA section;00A7 seenarabic;0633 seenfinalarabic;FEB2 seeninitialarabic;FEB3 seenmedialarabic;FEB4 segol;05B6 segol13;05B6 segol1f;05B6 segol2c;05B6 segolhebrew;05B6 segolnarrowhebrew;05B6 segolquarterhebrew;05B6 segoltahebrew;0592 segolwidehebrew;05B6 seharmenian;057D sehiragana;305B sekatakana;30BB sekatakanahalfwidth;FF7E semicolon;003B semicolonarabic;061B semicolonmonospace;FF1B semicolonsmall;FE54 semivoicedmarkkana;309C semivoicedmarkkanahalfwidth;FF9F sentisquare;3322 sentosquare;3323 seven;0037 sevenarabic;0667 sevenbengali;09ED sevencircle;2466 sevencircleinversesansserif;2790 sevendeva;096D seveneighths;215E sevengujarati;0AED sevengurmukhi;0A6D sevenhackarabic;0667 sevenhangzhou;3027 sevenideographicparen;3226 seveninferior;2087 sevenmonospace;FF17 sevenoldstyle;F737 sevenparen;247A sevenperiod;248E sevenpersian;06F7 sevenroman;2176 sevensuperior;2077 seventeencircle;2470 seventeenparen;2484 seventeenperiod;2498 seventhai;0E57 sfthyphen;00AD shaarmenian;0577 shabengali;09B6 shacyrillic;0448 shaddaarabic;0651 shaddadammaarabic;FC61 shaddadammatanarabic;FC5E shaddafathaarabic;FC60 shaddafathatanarabic;0651 064B shaddakasraarabic;FC62 shaddakasratanarabic;FC5F shade;2592 shadedark;2593 shadelight;2591 shademedium;2592 shadeva;0936 shagujarati;0AB6 shagurmukhi;0A36 shalshelethebrew;0593 shbopomofo;3115 shchacyrillic;0449 sheenarabic;0634 sheenfinalarabic;FEB6 sheeninitialarabic;FEB7 sheenmedialarabic;FEB8 sheicoptic;03E3 sheqel;20AA sheqelhebrew;20AA sheva;05B0 sheva115;05B0 sheva15;05B0 sheva22;05B0 sheva2e;05B0 shevahebrew;05B0 shevanarrowhebrew;05B0 shevaquarterhebrew;05B0 shevawidehebrew;05B0 shhacyrillic;04BB shimacoptic;03ED shin;05E9 shindagesh;FB49 shindageshhebrew;FB49 shindageshshindot;FB2C shindageshshindothebrew;FB2C shindageshsindot;FB2D shindageshsindothebrew;FB2D shindothebrew;05C1 shinhebrew;05E9 shinshindot;FB2A shinshindothebrew;FB2A shinsindot;FB2B shinsindothebrew;FB2B shook;0282 sigma;03C3 sigma1;03C2 sigmafinal;03C2 sigmalunatesymbolgreek;03F2 sihiragana;3057 sikatakana;30B7 sikatakanahalfwidth;FF7C siluqhebrew;05BD siluqlefthebrew;05BD similar;223C sindothebrew;05C2 siosacirclekorean;3274 siosaparenkorean;3214 sioscieuckorean;317E sioscirclekorean;3266 sioskiyeokkorean;317A sioskorean;3145 siosnieunkorean;317B siosparenkorean;3206 siospieupkorean;317D siostikeutkorean;317C six;0036 sixarabic;0666 sixbengali;09EC sixcircle;2465 sixcircleinversesansserif;278F sixdeva;096C sixgujarati;0AEC sixgurmukhi;0A6C sixhackarabic;0666 sixhangzhou;3026 sixideographicparen;3225 sixinferior;2086 sixmonospace;FF16 sixoldstyle;F736 sixparen;2479 sixperiod;248D sixpersian;06F6 sixroman;2175 sixsuperior;2076 sixteencircle;246F sixteencurrencydenominatorbengali;09F9 sixteenparen;2483 sixteenperiod;2497 sixthai;0E56 slash;002F slashmonospace;FF0F slong;017F slongdotaccent;1E9B smileface;263A smonospace;FF53 sofpasuqhebrew;05C3 softhyphen;00AD softsigncyrillic;044C sohiragana;305D sokatakana;30BD sokatakanahalfwidth;FF7F soliduslongoverlaycmb;0338 solidusshortoverlaycmb;0337 sorusithai;0E29 sosalathai;0E28 sosothai;0E0B sosuathai;0E2A space;0020 spacehackarabic;0020 spade;2660 spadesuitblack;2660 spadesuitwhite;2664 sparen;24AE squarebelowcmb;033B squarecc;33C4 squarecm;339D squarediagonalcrosshatchfill;25A9 squarehorizontalfill;25A4 squarekg;338F squarekm;339E squarekmcapital;33CE squareln;33D1 squarelog;33D2 squaremg;338E squaremil;33D5 squaremm;339C squaremsquared;33A1 squareorthogonalcrosshatchfill;25A6 squareupperlefttolowerrightfill;25A7 squareupperrighttolowerleftfill;25A8 squareverticalfill;25A5 squarewhitewithsmallblack;25A3 srsquare;33DB ssabengali;09B7 ssadeva;0937 ssagujarati;0AB7 ssangcieuckorean;3149 ssanghieuhkorean;3185 ssangieungkorean;3180 ssangkiyeokkorean;3132 ssangnieunkorean;3165 ssangpieupkorean;3143 ssangsioskorean;3146 ssangtikeutkorean;3138 ssuperior;F6F2 sterling;00A3 sterlingmonospace;FFE1 strokelongoverlaycmb;0336 strokeshortoverlaycmb;0335 subset;2282 subsetnotequal;228A subsetorequal;2286 succeeds;227B suchthat;220B suhiragana;3059 sukatakana;30B9 sukatakanahalfwidth;FF7D sukunarabic;0652 summation;2211 sun;263C superset;2283 supersetnotequal;228B supersetorequal;2287 svsquare;33DC syouwaerasquare;337C t;0074 tabengali;09A4 tackdown;22A4 tackleft;22A3 tadeva;0924 tagujarati;0AA4 tagurmukhi;0A24 taharabic;0637 tahfinalarabic;FEC2 tahinitialarabic;FEC3 tahiragana;305F tahmedialarabic;FEC4 taisyouerasquare;337D takatakana;30BF takatakanahalfwidth;FF80 tatweelarabic;0640 tau;03C4 tav;05EA tavdages;FB4A tavdagesh;FB4A tavdageshhebrew;FB4A tavhebrew;05EA tbar;0167 tbopomofo;310A tcaron;0165 tccurl;02A8 tcedilla;0163 tcheharabic;0686 tchehfinalarabic;FB7B tchehinitialarabic;FB7C tchehmedialarabic;FB7D tchehmeeminitialarabic;FB7C FEE4 tcircle;24E3 tcircumflexbelow;1E71 tcommaaccent;0163 tdieresis;1E97 tdotaccent;1E6B tdotbelow;1E6D tecyrillic;0442 tedescendercyrillic;04AD teharabic;062A tehfinalarabic;FE96 tehhahinitialarabic;FCA2 tehhahisolatedarabic;FC0C tehinitialarabic;FE97 tehiragana;3066 tehjeeminitialarabic;FCA1 tehjeemisolatedarabic;FC0B tehmarbutaarabic;0629 tehmarbutafinalarabic;FE94 tehmedialarabic;FE98 tehmeeminitialarabic;FCA4 tehmeemisolatedarabic;FC0E tehnoonfinalarabic;FC73 tekatakana;30C6 tekatakanahalfwidth;FF83 telephone;2121 telephoneblack;260E telishagedolahebrew;05A0 telishaqetanahebrew;05A9 tencircle;2469 tenideographicparen;3229 tenparen;247D tenperiod;2491 tenroman;2179 tesh;02A7 tet;05D8 tetdagesh;FB38 tetdageshhebrew;FB38 tethebrew;05D8 tetsecyrillic;04B5 tevirhebrew;059B tevirlefthebrew;059B thabengali;09A5 thadeva;0925 thagujarati;0AA5 thagurmukhi;0A25 thalarabic;0630 thalfinalarabic;FEAC thanthakhatlowleftthai;F898 thanthakhatlowrightthai;F897 thanthakhatthai;0E4C thanthakhatupperleftthai;F896 theharabic;062B thehfinalarabic;FE9A thehinitialarabic;FE9B thehmedialarabic;FE9C thereexists;2203 therefore;2234 theta;03B8 theta1;03D1 thetasymbolgreek;03D1 thieuthacirclekorean;3279 thieuthaparenkorean;3219 thieuthcirclekorean;326B thieuthkorean;314C thieuthparenkorean;320B thirteencircle;246C thirteenparen;2480 thirteenperiod;2494 thonangmonthothai;0E11 thook;01AD thophuthaothai;0E12 thorn;00FE thothahanthai;0E17 thothanthai;0E10 thothongthai;0E18 thothungthai;0E16 thousandcyrillic;0482 thousandsseparatorarabic;066C thousandsseparatorpersian;066C three;0033 threearabic;0663 threebengali;09E9 threecircle;2462 threecircleinversesansserif;278C threedeva;0969 threeeighths;215C threegujarati;0AE9 threegurmukhi;0A69 threehackarabic;0663 threehangzhou;3023 threeideographicparen;3222 threeinferior;2083 threemonospace;FF13 threenumeratorbengali;09F6 threeoldstyle;F733 threeparen;2476 threeperiod;248A threepersian;06F3 threequarters;00BE threequartersemdash;F6DE threeroman;2172 threesuperior;00B3 threethai;0E53 thzsquare;3394 tihiragana;3061 tikatakana;30C1 tikatakanahalfwidth;FF81 tikeutacirclekorean;3270 tikeutaparenkorean;3210 tikeutcirclekorean;3262 tikeutkorean;3137 tikeutparenkorean;3202 tilde;02DC tildebelowcmb;0330 tildecmb;0303 tildecomb;0303 tildedoublecmb;0360 tildeoperator;223C tildeoverlaycmb;0334 tildeverticalcmb;033E timescircle;2297 tipehahebrew;0596 tipehalefthebrew;0596 tippigurmukhi;0A70 titlocyrilliccmb;0483 tiwnarmenian;057F tlinebelow;1E6F tmonospace;FF54 toarmenian;0569 tohiragana;3068 tokatakana;30C8 tokatakanahalfwidth;FF84 tonebarextrahighmod;02E5 tonebarextralowmod;02E9 tonebarhighmod;02E6 tonebarlowmod;02E8 tonebarmidmod;02E7 tonefive;01BD tonesix;0185 tonetwo;01A8 tonos;0384 tonsquare;3327 topatakthai;0E0F tortoiseshellbracketleft;3014 tortoiseshellbracketleftsmall;FE5D tortoiseshellbracketleftvertical;FE39 tortoiseshellbracketright;3015 tortoiseshellbracketrightsmall;FE5E tortoiseshellbracketrightvertical;FE3A totaothai;0E15 tpalatalhook;01AB tparen;24AF trademark;2122 trademarksans;F8EA trademarkserif;F6DB tretroflexhook;0288 triagdn;25BC triaglf;25C4 triagrt;25BA triagup;25B2 ts;02A6 tsadi;05E6 tsadidagesh;FB46 tsadidageshhebrew;FB46 tsadihebrew;05E6 tsecyrillic;0446 tsere;05B5 tsere12;05B5 tsere1e;05B5 tsere2b;05B5 tserehebrew;05B5 tserenarrowhebrew;05B5 tserequarterhebrew;05B5 tserewidehebrew;05B5 tshecyrillic;045B tsuperior;F6F3 ttabengali;099F ttadeva;091F ttagujarati;0A9F ttagurmukhi;0A1F tteharabic;0679 ttehfinalarabic;FB67 ttehinitialarabic;FB68 ttehmedialarabic;FB69 tthabengali;09A0 tthadeva;0920 tthagujarati;0AA0 tthagurmukhi;0A20 tturned;0287 tuhiragana;3064 tukatakana;30C4 tukatakanahalfwidth;FF82 tusmallhiragana;3063 tusmallkatakana;30C3 tusmallkatakanahalfwidth;FF6F twelvecircle;246B twelveparen;247F twelveperiod;2493 twelveroman;217B twentycircle;2473 twentyhangzhou;5344 twentyparen;2487 twentyperiod;249B two;0032 twoarabic;0662 twobengali;09E8 twocircle;2461 twocircleinversesansserif;278B twodeva;0968 twodotenleader;2025 twodotleader;2025 twodotleadervertical;FE30 twogujarati;0AE8 twogurmukhi;0A68 twohackarabic;0662 twohangzhou;3022 twoideographicparen;3221 twoinferior;2082 twomonospace;FF12 twonumeratorbengali;09F5 twooldstyle;F732 twoparen;2475 twoperiod;2489 twopersian;06F2 tworoman;2171 twostroke;01BB twosuperior;00B2 twothai;0E52 twothirds;2154 u;0075 uacute;00FA ubar;0289 ubengali;0989 ubopomofo;3128 ubreve;016D ucaron;01D4 ucircle;24E4 ucircumflex;00FB ucircumflexbelow;1E77 ucyrillic;0443 udattadeva;0951 udblacute;0171 udblgrave;0215 udeva;0909 udieresis;00FC udieresisacute;01D8 udieresisbelow;1E73 udieresiscaron;01DA udieresiscyrillic;04F1 udieresisgrave;01DC udieresismacron;01D6 udotbelow;1EE5 ugrave;00F9 ugujarati;0A89 ugurmukhi;0A09 uhiragana;3046 uhookabove;1EE7 uhorn;01B0 uhornacute;1EE9 uhorndotbelow;1EF1 uhorngrave;1EEB uhornhookabove;1EED uhorntilde;1EEF uhungarumlaut;0171 uhungarumlautcyrillic;04F3 uinvertedbreve;0217 ukatakana;30A6 ukatakanahalfwidth;FF73 ukcyrillic;0479 ukorean;315C umacron;016B umacroncyrillic;04EF umacrondieresis;1E7B umatragurmukhi;0A41 umonospace;FF55 underscore;005F underscoredbl;2017 underscoremonospace;FF3F underscorevertical;FE33 underscorewavy;FE4F union;222A universal;2200 uogonek;0173 uparen;24B0 upblock;2580 upperdothebrew;05C4 upsilon;03C5 upsilondieresis;03CB upsilondieresistonos;03B0 upsilonlatin;028A upsilontonos;03CD uptackbelowcmb;031D uptackmod;02D4 uragurmukhi;0A73 uring;016F ushortcyrillic;045E usmallhiragana;3045 usmallkatakana;30A5 usmallkatakanahalfwidth;FF69 ustraightcyrillic;04AF ustraightstrokecyrillic;04B1 utilde;0169 utildeacute;1E79 utildebelow;1E75 uubengali;098A uudeva;090A uugujarati;0A8A uugurmukhi;0A0A uumatragurmukhi;0A42 uuvowelsignbengali;09C2 uuvowelsigndeva;0942 uuvowelsigngujarati;0AC2 uvowelsignbengali;09C1 uvowelsigndeva;0941 uvowelsigngujarati;0AC1 v;0076 vadeva;0935 vagujarati;0AB5 vagurmukhi;0A35 vakatakana;30F7 vav;05D5 vavdagesh;FB35 vavdagesh65;FB35 vavdageshhebrew;FB35 vavhebrew;05D5 vavholam;FB4B vavholamhebrew;FB4B vavvavhebrew;05F0 vavyodhebrew;05F1 vcircle;24E5 vdotbelow;1E7F vecyrillic;0432 veharabic;06A4 vehfinalarabic;FB6B vehinitialarabic;FB6C vehmedialarabic;FB6D vekatakana;30F9 venus;2640 verticalbar;007C verticallineabovecmb;030D verticallinebelowcmb;0329 verticallinelowmod;02CC verticallinemod;02C8 vewarmenian;057E vhook;028B vikatakana;30F8 viramabengali;09CD viramadeva;094D viramagujarati;0ACD visargabengali;0983 visargadeva;0903 visargagujarati;0A83 vmonospace;FF56 voarmenian;0578 voicediterationhiragana;309E voicediterationkatakana;30FE voicedmarkkana;309B voicedmarkkanahalfwidth;FF9E vokatakana;30FA vparen;24B1 vtilde;1E7D vturned;028C vuhiragana;3094 vukatakana;30F4 w;0077 wacute;1E83 waekorean;3159 wahiragana;308F wakatakana;30EF wakatakanahalfwidth;FF9C wakorean;3158 wasmallhiragana;308E wasmallkatakana;30EE wattosquare;3357 wavedash;301C wavyunderscorevertical;FE34 wawarabic;0648 wawfinalarabic;FEEE wawhamzaabovearabic;0624 wawhamzaabovefinalarabic;FE86 wbsquare;33DD wcircle;24E6 wcircumflex;0175 wdieresis;1E85 wdotaccent;1E87 wdotbelow;1E89 wehiragana;3091 weierstrass;2118 wekatakana;30F1 wekorean;315E weokorean;315D wgrave;1E81 whitebullet;25E6 whitecircle;25CB whitecircleinverse;25D9 whitecornerbracketleft;300E whitecornerbracketleftvertical;FE43 whitecornerbracketright;300F whitecornerbracketrightvertical;FE44 whitediamond;25C7 whitediamondcontainingblacksmalldiamond;25C8 whitedownpointingsmalltriangle;25BF whitedownpointingtriangle;25BD whiteleftpointingsmalltriangle;25C3 whiteleftpointingtriangle;25C1 whitelenticularbracketleft;3016 whitelenticularbracketright;3017 whiterightpointingsmalltriangle;25B9 whiterightpointingtriangle;25B7 whitesmallsquare;25AB whitesmilingface;263A whitesquare;25A1 whitestar;2606 whitetelephone;260F whitetortoiseshellbracketleft;3018 whitetortoiseshellbracketright;3019 whiteuppointingsmalltriangle;25B5 whiteuppointingtriangle;25B3 wihiragana;3090 wikatakana;30F0 wikorean;315F wmonospace;FF57 wohiragana;3092 wokatakana;30F2 wokatakanahalfwidth;FF66 won;20A9 wonmonospace;FFE6 wowaenthai;0E27 wparen;24B2 wring;1E98 wsuperior;02B7 wturned;028D wynn;01BF x;0078 xabovecmb;033D xbopomofo;3112 xcircle;24E7 xdieresis;1E8D xdotaccent;1E8B xeharmenian;056D xi;03BE xmonospace;FF58 xparen;24B3 xsuperior;02E3 y;0079 yaadosquare;334E yabengali;09AF yacute;00FD yadeva;092F yaekorean;3152 yagujarati;0AAF yagurmukhi;0A2F yahiragana;3084 yakatakana;30E4 yakatakanahalfwidth;FF94 yakorean;3151 yamakkanthai;0E4E yasmallhiragana;3083 yasmallkatakana;30E3 yasmallkatakanahalfwidth;FF6C yatcyrillic;0463 ycircle;24E8 ycircumflex;0177 ydieresis;00FF ydotaccent;1E8F ydotbelow;1EF5 yeharabic;064A yehbarreearabic;06D2 yehbarreefinalarabic;FBAF yehfinalarabic;FEF2 yehhamzaabovearabic;0626 yehhamzaabovefinalarabic;FE8A yehhamzaaboveinitialarabic;FE8B yehhamzaabovemedialarabic;FE8C yehinitialarabic;FEF3 yehmedialarabic;FEF4 yehmeeminitialarabic;FCDD yehmeemisolatedarabic;FC58 yehnoonfinalarabic;FC94 yehthreedotsbelowarabic;06D1 yekorean;3156 yen;00A5 yenmonospace;FFE5 yeokorean;3155 yeorinhieuhkorean;3186 yerahbenyomohebrew;05AA yerahbenyomolefthebrew;05AA yericyrillic;044B yerudieresiscyrillic;04F9 yesieungkorean;3181 yesieungpansioskorean;3183 yesieungsioskorean;3182 yetivhebrew;059A ygrave;1EF3 yhook;01B4 yhookabove;1EF7 yiarmenian;0575 yicyrillic;0457 yikorean;3162 yinyang;262F yiwnarmenian;0582 ymonospace;FF59 yod;05D9 yoddagesh;FB39 yoddageshhebrew;FB39 yodhebrew;05D9 yodyodhebrew;05F2 yodyodpatahhebrew;FB1F yohiragana;3088 yoikorean;3189 yokatakana;30E8 yokatakanahalfwidth;FF96 yokorean;315B yosmallhiragana;3087 yosmallkatakana;30E7 yosmallkatakanahalfwidth;FF6E yotgreek;03F3 yoyaekorean;3188 yoyakorean;3187 yoyakthai;0E22 yoyingthai;0E0D yparen;24B4 ypogegrammeni;037A ypogegrammenigreekcmb;0345 yr;01A6 yring;1E99 ysuperior;02B8 ytilde;1EF9 yturned;028E yuhiragana;3086 yuikorean;318C yukatakana;30E6 yukatakanahalfwidth;FF95 yukorean;3160 yusbigcyrillic;046B yusbigiotifiedcyrillic;046D yuslittlecyrillic;0467 yuslittleiotifiedcyrillic;0469 yusmallhiragana;3085 yusmallkatakana;30E5 yusmallkatakanahalfwidth;FF6D yuyekorean;318B yuyeokorean;318A yyabengali;09DF yyadeva;095F z;007A zaarmenian;0566 zacute;017A zadeva;095B zagurmukhi;0A5B zaharabic;0638 zahfinalarabic;FEC6 zahinitialarabic;FEC7 zahiragana;3056 zahmedialarabic;FEC8 zainarabic;0632 zainfinalarabic;FEB0 zakatakana;30B6 zaqefgadolhebrew;0595 zaqefqatanhebrew;0594 zarqahebrew;0598 zayin;05D6 zayindagesh;FB36 zayindageshhebrew;FB36 zayinhebrew;05D6 zbopomofo;3117 zcaron;017E zcircle;24E9 zcircumflex;1E91 zcurl;0291 zdot;017C zdotaccent;017C zdotbelow;1E93 zecyrillic;0437 zedescendercyrillic;0499 zedieresiscyrillic;04DF zehiragana;305C zekatakana;30BC zero;0030 zeroarabic;0660 zerobengali;09E6 zerodeva;0966 zerogujarati;0AE6 zerogurmukhi;0A66 zerohackarabic;0660 zeroinferior;2080 zeromonospace;FF10 zerooldstyle;F730 zeropersian;06F0 zerosuperior;2070 zerothai;0E50 zerowidthjoiner;FEFF zerowidthnonjoiner;200C zerowidthspace;200B zeta;03B6 zhbopomofo;3113 zhearmenian;056A zhebrevecyrillic;04C2 zhecyrillic;0436 zhedescendercyrillic;0497 zhedieresiscyrillic;04DD zihiragana;3058 zikatakana;30B8 zinorhebrew;05AE zlinebelow;1E95 zmonospace;FF5A zohiragana;305E zokatakana;30BE zparen;24B5 zretroflexhook;0290 zstroke;01B6 zuhiragana;305A zukatakana;30BA a100;275E a101;2761 a102;2762 a103;2763 a104;2764 a105;2710 a106;2765 a107;2766 a108;2767 a109;2660 a10;2721 a110;2665 a111;2666 a112;2663 a117;2709 a118;2708 a119;2707 a11;261B a120;2460 a121;2461 a122;2462 a123;2463 a124;2464 a125;2465 a126;2466 a127;2467 a128;2468 a129;2469 a12;261E a130;2776 a131;2777 a132;2778 a133;2779 a134;277A a135;277B a136;277C a137;277D a138;277E a139;277F a13;270C a140;2780 a141;2781 a142;2782 a143;2783 a144;2784 a145;2785 a146;2786 a147;2787 a148;2788 a149;2789 a14;270D a150;278A a151;278B a152;278C a153;278D a154;278E a155;278F a156;2790 a157;2791 a158;2792 a159;2793 a15;270E a160;2794 a161;2192 a162;27A3 a163;2194 a164;2195 a165;2799 a166;279B a167;279C a168;279D a169;279E a16;270F a170;279F a171;27A0 a172;27A1 a173;27A2 a174;27A4 a175;27A5 a176;27A6 a177;27A7 a178;27A8 a179;27A9 a17;2711 a180;27AB a181;27AD a182;27AF a183;27B2 a184;27B3 a185;27B5 a186;27B8 a187;27BA a188;27BB a189;27BC a18;2712 a190;27BD a191;27BE a192;279A a193;27AA a194;27B6 a195;27B9 a196;2798 a197;27B4 a198;27B7 a199;27AC a19;2713 a1;2701 a200;27AE a201;27B1 a202;2703 a203;2750 a204;2752 a205;276E a206;2770 a20;2714 a21;2715 a22;2716 a23;2717 a24;2718 a25;2719 a26;271A a27;271B a28;271C a29;2722 a2;2702 a30;2723 a31;2724 a32;2725 a33;2726 a34;2727 a35;2605 a36;2729 a37;272A a38;272B a39;272C a3;2704 a40;272D a41;272E a42;272F a43;2730 a44;2731 a45;2732 a46;2733 a47;2734 a48;2735 a49;2736 a4;260E a50;2737 a51;2738 a52;2739 a53;273A a54;273B a55;273C a56;273D a57;273E a58;273F a59;2740 a5;2706 a60;2741 a61;2742 a62;2743 a63;2744 a64;2745 a65;2746 a66;2747 a67;2748 a68;2749 a69;274A a6;271D a70;274B a71;25CF a72;274D a73;25A0 a74;274F a75;2751 a76;25B2 a77;25BC a78;25C6 a79;2756 a7;271E a81;25D7 a82;2758 a83;2759 a84;275A a85;276F a86;2771 a87;2772 a88;2773 a89;2768 a8;271F a90;2769 a91;276C a92;276D a93;276A a94;276B a95;2774 a96;2775 a97;275B a98;275C a99;275D a9;2720 """ # string table management # class StringTable: def __init__( self, name_list, master_table_name ): self.names = name_list self.master_table = master_table_name self.indices = {} index = 0 for name in name_list: self.indices[name] = index index += len( name ) + 1 self.total = index def dump( self, file ): write = file.write write( " static const char " + self.master_table + "[" + repr( self.total ) + "] =\n" ) write( " {\n" ) line = "" for name in self.names: line += " '" line += string.join( ( re.findall( ".", name ) ), "','" ) line += "', 0,\n" write( line + " };\n\n\n" ) def dump_sublist( self, file, table_name, macro_name, sublist ): write = file.write write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" ) write( " /* Values are offsets into the `" + self.master_table + "' table */\n\n" ) write( " static const short " + table_name + "[" + macro_name + "] =\n" ) write( " {\n" ) line = " " comma = "" col = 0 for name in sublist: line += comma line += "%4d" % self.indices[name] col += 1 comma = "," if col == 14: col = 0 comma = ",\n " write( line + "\n };\n\n\n" ) # We now store the Adobe Glyph List in compressed form. The list is put # into a data structure called `trie' (because it has a tree-like # appearance). Consider, for example, that you want to store the # following name mapping: # # A => 1 # Aacute => 6 # Abalon => 2 # Abstract => 4 # # It is possible to store the entries as follows. # # A => 1 # | # +-acute => 6 # | # +-b # | # +-alon => 2 # | # +-stract => 4 # # We see that each node in the trie has: # # - one or more `letters' # - an optional value # - zero or more child nodes # # The first step is to call # # root = StringNode( "", 0 ) # for word in map.values(): # root.add( word, map[word] ) # # which creates a large trie where each node has only one children. # # Executing # # root = root.optimize() # # optimizes the trie by merging the letters of successive nodes whenever # possible. # # Each node of the trie is stored as follows. # # - First the node's letter, according to the following scheme. We # use the fact that in the AGL no name contains character codes > 127. # # name bitsize description # ---------------------------------------------------------------- # notlast 1 Set to 1 if this is not the last letter # in the word. # ascii 7 The letter's ASCII value. # # - The letter is followed by a children count and the value of the # current key (if any). Again we can do some optimization because all # AGL entries are from the BMP; this means that 16 bits are sufficient # to store its Unicode values. Additionally, no node has more than # 127 children. # # name bitsize description # ----------------------------------------- # hasvalue 1 Set to 1 if a 16-bit Unicode value follows. # num_children 7 Number of children. Can be 0 only if # `hasvalue' is set to 1. # value 16 Optional Unicode value. # # - A node is finished by a list of 16bit absolute offsets to the # children, which must be sorted in increasing order of their first # letter. # # For simplicity, all 16bit quantities are stored in big-endian order. # # The root node has first letter = 0, and no value. # class StringNode: def __init__( self, letter, value ): self.letter = letter self.value = value self.children = {} def __cmp__( self, other ): return ord( self.letter[0] ) - ord( other.letter[0] ) def add( self, word, value ): if len( word ) == 0: self.value = value return letter = word[0] word = word[1:] if self.children.has_key( letter ): child = self.children[letter] else: child = StringNode( letter, 0 ) self.children[letter] = child child.add( word, value ) def optimize( self ): # optimize all children first children = self.children.values() self.children = {} for child in children: self.children[child.letter[0]] = child.optimize() # don't optimize if there's a value, # if we don't have any child or if we # have more than one child if ( self.value != 0 ) or ( not children ) or len( children ) > 1: return self child = children[0] self.letter += child.letter self.value = child.value self.children = child.children return self def dump_debug( self, write, margin ): # this is used during debugging line = margin + "+-" if len( self.letter ) == 0: line += "<NOLETTER>" else: line += self.letter if self.value: line += " => " + repr( self.value ) write( line + "\n" ) if self.children: margin += "| " for child in self.children.values(): child.dump_debug( write, margin ) def locate( self, index ): self.index = index if len( self.letter ) > 0: index += len( self.letter ) + 1 else: index += 2 if self.value != 0: index += 2 children = self.children.values() children.sort() index += 2 * len( children ) for child in children: index = child.locate( index ) return index def store( self, storage ): # write the letters l = len( self.letter ) if l == 0: storage += struct.pack( "B", 0 ) else: for n in range( l ): val = ord( self.letter[n] ) if n < l - 1: val += 128 storage += struct.pack( "B", val ) # write the count children = self.children.values() children.sort() count = len( children ) if self.value != 0: storage += struct.pack( "!BH", count + 128, self.value ) else: storage += struct.pack( "B", count ) for child in children: storage += struct.pack( "!H", child.index ) for child in children: storage = child.store( storage ) return storage def adobe_glyph_values(): """return the list of glyph names and their unicode values""" lines = string.split( adobe_glyph_list, '\n' ) glyphs = [] values = [] for line in lines: if line: fields = string.split( line, ';' ) # print fields[1] + ' - ' + fields[0] subfields = string.split( fields[1], ' ' ) if len( subfields ) == 1: glyphs.append( fields[0] ) values.append( fields[1] ) return glyphs, values def filter_glyph_names( alist, filter ): """filter `alist' by taking _out_ all glyph names that are in `filter'""" count = 0 extras = [] for name in alist: try: filtered_index = filter.index( name ) except: extras.append( name ) return extras def dump_encoding( file, encoding_name, encoding_list ): """dump a given encoding""" write = file.write write( " /* the following are indices into the SID name table */\n" ) write( " static const unsigned short " + encoding_name + "[" + repr( len( encoding_list ) ) + "] =\n" ) write( " {\n" ) line = " " comma = "" col = 0 for value in encoding_list: line += comma line += "%3d" % value comma = "," col += 1 if col == 16: col = 0 comma = ",\n " write( line + "\n };\n\n\n" ) def dump_array( the_array, write, array_name ): """dumps a given encoding""" write( " static const unsigned char " + array_name + "[" + repr( len( the_array ) ) + "L] =\n" ) write( " {\n" ) line = "" comma = " " col = 0 for value in the_array: line += comma line += "%3d" % ord( value ) comma = "," col += 1 if col == 16: col = 0 comma = ",\n " if len( line ) > 1024: write( line ) line = "" write( line + "\n };\n\n\n" ) def main(): """main program body""" if len( sys.argv ) != 2: print __doc__ % sys.argv[0] sys.exit( 1 ) file = open( sys.argv[1], "w\n" ) write = file.write count_sid = len( sid_standard_names ) # `mac_extras' contains the list of glyph names in the Macintosh standard # encoding which are not in the SID Standard Names. # mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names ) # `base_list' contains the names of our final glyph names table. # It consists of the `mac_extras' glyph names, followed by the SID # standard names. # mac_extras_count = len( mac_extras ) base_list = mac_extras + sid_standard_names write( "/***************************************************************************/\n" ) write( "/* */\n" ) write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) ) write( "/* */\n" ) write( "/* PostScript glyph names. */\n" ) write( "/* */\n" ) write( "/* Copyright 2005, 2008, 2011 by */\n" ) write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" ) write( "/* */\n" ) write( "/* This file is part of the FreeType project, and may only be used, */\n" ) write( "/* modified, and distributed under the terms of the FreeType project */\n" ) write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" ) write( "/* this file you indicate that you have read the license and */\n" ) write( "/* understand and accept it fully. */\n" ) write( "/* */\n" ) write( "/***************************************************************************/\n" ) write( "\n" ) write( "\n" ) write( " /* This file has been generated automatically -- do not edit! */\n" ) write( "\n" ) write( "\n" ) # dump final glyph list (mac extras + sid standard names) # st = StringTable( base_list, "ft_standard_glyph_names" ) st.dump( file ) st.dump_sublist( file, "ft_mac_names", "FT_NUM_MAC_NAMES", mac_standard_names ) st.dump_sublist( file, "ft_sid_names", "FT_NUM_SID_NAMES", sid_standard_names ) dump_encoding( file, "t1_standard_encoding", t1_standard_encoding ) dump_encoding( file, "t1_expert_encoding", t1_expert_encoding ) # dump the AGL in its compressed form # agl_glyphs, agl_values = adobe_glyph_values() dict = StringNode( "", 0 ) for g in range( len( agl_glyphs ) ): dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) ) dict = dict.optimize() dict_len = dict.locate( 0 ) dict_array = dict.store( "" ) write( """\ /* * This table is a compressed version of the Adobe Glyph List (AGL), * optimized for efficient searching. It has been generated by the * `glnames.py' python script located in the `src/tools' directory. * * The lookup function to get the Unicode value for a given string * is defined below the table. */ #ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST """ ) dump_array( dict_array, write, "ft_adobe_glyph_list" ) # write the lookup routine now # write( """\ /* * This function searches the compressed table efficiently. */ static unsigned long ft_get_adobe_glyph_index( const char* name, const char* limit ) { int c = 0; int count, min, max; const unsigned char* p = ft_adobe_glyph_list; if ( name == 0 || name >= limit ) goto NotFound; c = *name++; count = p[1]; p += 2; min = 0; max = count; while ( min < max ) { int mid = ( min + max ) >> 1; const unsigned char* q = p + mid * 2; int c2; q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] ); c2 = q[0] & 127; if ( c2 == c ) { p = q; goto Found; } if ( c2 < c ) min = mid + 1; else max = mid; } goto NotFound; Found: for (;;) { /* assert (*p & 127) == c */ if ( name >= limit ) { if ( (p[0] & 128) == 0 && (p[1] & 128) != 0 ) return (unsigned long)( ( (int)p[2] << 8 ) | p[3] ); goto NotFound; } c = *name++; if ( p[0] & 128 ) { p++; if ( c != (p[0] & 127) ) goto NotFound; continue; } p++; count = p[0] & 127; if ( p[0] & 128 ) p += 2; p++; for ( ; count > 0; count--, p += 2 ) { int offset = ( (int)p[0] << 8 ) | p[1]; const unsigned char* q = ft_adobe_glyph_list + offset; if ( c == ( q[0] & 127 ) ) { p = q; goto NextIter; } } goto NotFound; NextIter: ; } NotFound: return 0; } #endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */ """ ) if 0: # generate unit test, or don't # # now write the unit test to check that everything works OK # write( "#ifdef TEST\n\n" ) write( "static const char* const the_names[] = {\n" ) for name in agl_glyphs: write( ' "' + name + '",\n' ) write( " 0\n};\n" ) write( "static const unsigned long the_values[] = {\n" ) for val in agl_values: write( ' 0x' + val + ',\n' ) write( " 0\n};\n" ) write( """ #include <stdlib.h> #include <stdio.h> int main( void ) { int result = 0; const char* const* names = the_names; const unsigned long* values = the_values; for ( ; *names; names++, values++ ) { const char* name = *names; unsigned long reference = *values; unsigned long value; value = ft_get_adobe_glyph_index( name, name + strlen( name ) ); if ( value != reference ) { result = 1; fprintf( stderr, "name '%s' => %04x instead of %04x\\n", name, value, reference ); } } return result; } """ ) write( "#endif /* TEST */\n" ) write("\n/* END */\n") # Now run the main routine # main() # END
[ [ 8, 0, 0.0042, 0.0016, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0055, 0.0002, 0, 0.66, 0.0714, 509, 0, 5, 0, 0, 509, 0, 0 ], [ 14, 0, 0.0173, 0.0195, 0, 0.6...
[ "\"\"\"\\\n\nusage: %s <output-file>\n\n This python script generates the glyph names tables defined in the\n `psnames' module.\n\n Its single argument is the name of the header file to be created.", "import sys, string, struct, re, os.path", "mac_standard_names = \\\n[\n # 0\n \".notdef\", \".null\", \"no...
#!/usr/bin/env python # # Check trace components in FreeType 2 source. # Author: suzuki toshiya, 2009 # # This code is explicitly into the public domain. import sys import os import re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [ "src" ] TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ] # -------------------------------------------------------------- # Parse command line options # for i in range( 1, len( sys.argv ) ): if sys.argv[i].startswith( "--help" ): print "Usage: %s [option]" % sys.argv[0] print "Search used-but-defined and defined-but-not-used trace_XXX macros" print "" print " --help:" print " Show this help" print "" print " --src-dirs=dir1:dir2:..." print " Specify the directories of C source files to be checked" print " Default is %s" % ":".join( SRC_FILE_DIRS ) print "" print " --def-files=file1:file2:..." print " Specify the header files including FT_TRACE_DEF()" print " Default is %s" % ":".join( TRACE_DEF_FILES ) print "" exit(0) if sys.argv[i].startswith( "--src-dirs=" ): SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" ) elif sys.argv[i].startswith( "--def-files=" ): TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" ) # -------------------------------------------------------------- # Scan C source and header files using trace macros. # c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE ) trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' ) for d in SRC_FILE_DIRS: for ( p, dlst, flst ) in os.walk( d ): for f in flst: if c_pathname_pat.match( f ) != None: src_pathname = os.path.join( p, f ) line_num = 0 for src_line in open( src_pathname, 'r' ): line_num = line_num + 1 src_line = src_line.strip() if trace_use_pat.match( src_line ) != None: component_name = trace_use_pat.sub( '', src_line ) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) ) else: USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ] # -------------------------------------------------------------- # Scan header file(s) defining trace macros. # trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' ) trace_def_pat_cls = re.compile( '[ \t\)].*$' ) for f in TRACE_DEF_FILES: line_num = 0 for hdr_line in open( f, 'r' ): line_num = line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match( hdr_line ) != None: component_name = trace_def_pat_opn.sub( '', hdr_line ) component_name = trace_def_pat_cls.sub( '', component_name ) if component_name in KNOWN_COMPONENT: print "trace component %s is defined twice, see %s and fttrace.h:%d" % \ ( component_name, KNOWN_COMPONENT[component_name], line_num ) else: KNOWN_COMPONENT[component_name] = "%s:%d" % \ ( os.path.basename( f ), line_num ) # -------------------------------------------------------------- # Compare the used and defined trace macros. # print "# Trace component used in the implementations but not defined in fttrace.h." cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in KNOWN_COMPONENT: print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) ) print "# Trace component is defined but not used in the implementations." cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in USED_COMPONENT: if c != "any": print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
[ [ 1, 0, 0.0796, 0.0088, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0885, 0.0088, 0, 0.66, 0.0455, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0973, 0.0088, 0, ...
[ "import sys", "import os", "import re", "SRC_FILE_LIST = []", "USED_COMPONENT = {}", "KNOWN_COMPONENT = {}", "SRC_FILE_DIRS = [ \"src\" ]", "TRACE_DEF_FILES = [ \"include/freetype/internal/fttrace.h\" ]", "for i in range( 1, len( sys.argv ) ):\n if sys.argv[i].startswith( \"--help\" ):\n pr...
#!/usr/bin/env python # # DocMaker (c) 2002, 2004, 2008 David Turner <david@freetype.org> # # This program is a re-write of the original DocMaker took used # to generate the API Reference of the FreeType font engine # by converting in-source comments into structured HTML. # # This new version is capable of outputting XML data, as well # as accepts more liberal formatting options. # # It also uses regular expression matching and substitution # to speed things significantly. # from sources import * from content import * from utils import * from formatter import * from tohtml import * import utils import sys, os, time, string, glob, getopt def usage(): print "\nDocMaker Usage information\n" print " docmaker [options] file1 [file2 ...]\n" print "using the following options:\n" print " -h : print this page" print " -t : set project title, as in '-t \"My Project\"'" print " -o : set output directory, as in '-o mydir'" print " -p : set documentation prefix, as in '-p ft2'" print "" print " --title : same as -t, as in '--title=\"My Project\"'" print " --output : same as -o, as in '--output=mydir'" print " --prefix : same as -p, as in '--prefix=ft2'" def main( argv ): """main program loop""" global output_dir try: opts, args = getopt.getopt( sys.argv[1:], \ "ht:o:p:", \ ["help", "title=", "output=", "prefix="] ) except getopt.GetoptError: usage() sys.exit( 2 ) if args == []: usage() sys.exit( 1 ) # process options # project_title = "Project" project_prefix = None output_dir = None for opt in opts: if opt[0] in ( "-h", "--help" ): usage() sys.exit( 0 ) if opt[0] in ( "-t", "--title" ): project_title = opt[1] if opt[0] in ( "-o", "--output" ): utils.output_dir = opt[1] if opt[0] in ( "-p", "--prefix" ): project_prefix = opt[1] check_output() # create context and processor source_processor = SourceProcessor() content_processor = ContentProcessor() # retrieve the list of files to process file_list = make_file_list( args ) for filename in file_list: source_processor.parse_file( filename ) content_processor.parse_sources( source_processor ) # process sections content_processor.finish() formatter = HtmlFormatter( content_processor, project_title, project_prefix ) formatter.toc_dump() formatter.index_dump() formatter.section_dump_all() # if called from the command line # if __name__ == '__main__': main( sys.argv ) # eof
[ [ 1, 0, 0.1509, 0.0094, 0, 0.66, 0, 648, 0, 1, 0, 0, 648, 0, 0 ], [ 1, 0, 0.1604, 0.0094, 0, 0.66, 0.1111, 273, 0, 1, 0, 0, 273, 0, 0 ], [ 1, 0, 0.1698, 0.0094, 0, ...
[ "from sources import *", "from content import *", "from utils import *", "from formatter import *", "from tohtml import *", "import utils", "import sys, os, time, string, glob, getopt", "def usage():\n print(\"\\nDocMaker Usage information\\n\")\n print(\" docmaker [options] file1 [...
#!/usr/bin/env python # # DocBeauty (c) 2003, 2004, 2008 David Turner <david@freetype.org> # # This program is used to beautify the documentation comments used # in the FreeType 2 public headers. # from sources import * from content import * from utils import * import utils import sys, os, time, string, getopt content_processor = ContentProcessor() def beautify_block( block ): if block.content: content_processor.reset() markups = content_processor.process_content( block.content ) text = [] first = 1 for markup in markups: text.extend( markup.beautify( first ) ) first = 0 # now beautify the documentation "borders" themselves lines = [" /*************************************************************************"] for l in text: lines.append( " *" + l ) lines.append( " */" ) block.lines = lines def usage(): print "\nDocBeauty 0.1 Usage information\n" print " docbeauty [options] file1 [file2 ...]\n" print "using the following options:\n" print " -h : print this page" print " -b : backup original files with the 'orig' extension" print "" print " --backup : same as -b" def main( argv ): """main program loop""" global output_dir try: opts, args = getopt.getopt( sys.argv[1:], \ "hb", \ ["help", "backup"] ) except getopt.GetoptError: usage() sys.exit( 2 ) if args == []: usage() sys.exit( 1 ) # process options # output_dir = None do_backup = None for opt in opts: if opt[0] in ( "-h", "--help" ): usage() sys.exit( 0 ) if opt[0] in ( "-b", "--backup" ): do_backup = 1 # create context and processor source_processor = SourceProcessor() # retrieve the list of files to process file_list = make_file_list( args ) for filename in file_list: source_processor.parse_file( filename ) for block in source_processor.blocks: beautify_block( block ) new_name = filename + ".new" ok = None try: file = open( new_name, "wt" ) for block in source_processor.blocks: for line in block.lines: file.write( line ) file.write( "\n" ) file.close() except: ok = 0 # if called from the command line # if __name__ == '__main__': main( sys.argv ) # eof
[ [ 1, 0, 0.0796, 0.0088, 0, 0.66, 0, 648, 0, 1, 0, 0, 648, 0, 0 ], [ 1, 0, 0.0885, 0.0088, 0, 0.66, 0.1111, 273, 0, 1, 0, 0, 273, 0, 0 ], [ 1, 0, 0.0973, 0.0088, 0, ...
[ "from sources import *", "from content import *", "from utils import *", "import utils", "import sys, os, time, string, getopt", "content_processor = ContentProcessor()", "def beautify_block( block ):\n if block.content:\n content_processor.reset()\n\n markups = content_processor.pro...
# Sources (c) 2002, 2003, 2004, 2006, 2007, 2008, 2009 # David Turner <david@freetype.org> # # # this file contains definitions of classes needed to decompose # C sources files into a series of multi-line "blocks". There are # two kinds of blocks: # # - normal blocks, which contain source code or ordinary comments # # - documentation blocks, which have restricted formatting, and # whose text always start with a documentation markup tag like # "<Function>", "<Type>", etc.. # # the routines used to process the content of documentation blocks # are not contained here, but in "content.py" # # the classes and methods found here only deal with text parsing # and basic documentation block extraction # import fileinput, re, sys, os, string ################################################################ ## ## BLOCK FORMAT PATTERN ## ## A simple class containing compiled regular expressions used ## to detect potential documentation format block comments within ## C source code ## ## note that the 'column' pattern must contain a group that will ## be used to "unbox" the content of documentation comment blocks ## class SourceBlockFormat: def __init__( self, id, start, column, end ): """create a block pattern, used to recognize special documentation blocks""" self.id = id self.start = re.compile( start, re.VERBOSE ) self.column = re.compile( column, re.VERBOSE ) self.end = re.compile( end, re.VERBOSE ) # # format 1 documentation comment blocks look like the following: # # /************************************/ # /* */ # /* */ # /* */ # /************************************/ # # we define a few regular expressions here to detect them # start = r''' \s* # any number of whitespace /\*{2,}/ # followed by '/' and at least two asterisks then '/' \s*$ # probably followed by whitespace ''' column = r''' \s* # any number of whitespace /\*{1} # followed by '/' and precisely one asterisk ([^*].*) # followed by anything (group 1) \*{1}/ # followed by one asterisk and a '/' \s*$ # probably followed by whitespace ''' re_source_block_format1 = SourceBlockFormat( 1, start, column, start ) # # format 2 documentation comment blocks look like the following: # # /************************************ (at least 2 asterisks) # * # * # * # * # **/ (1 or more asterisks at the end) # # we define a few regular expressions here to detect them # start = r''' \s* # any number of whitespace /\*{2,} # followed by '/' and at least two asterisks \s*$ # probably followed by whitespace ''' column = r''' \s* # any number of whitespace \*{1}(?!/) # followed by precisely one asterisk not followed by `/' (.*) # then anything (group1) ''' end = r''' \s* # any number of whitespace \*+/ # followed by at least one asterisk, then '/' ''' re_source_block_format2 = SourceBlockFormat( 2, start, column, end ) # # the list of supported documentation block formats, we could add new ones # relatively easily # re_source_block_formats = [re_source_block_format1, re_source_block_format2] # # the following regular expressions corresponds to markup tags # within the documentation comment blocks. they're equivalent # despite their different syntax # # notice how each markup tag _must_ begin a new line # re_markup_tag1 = re.compile( r'''\s*<(\w*)>''' ) # <xxxx> format re_markup_tag2 = re.compile( r'''\s*@(\w*):''' ) # @xxxx: format # # the list of supported markup tags, we could add new ones relatively # easily # re_markup_tags = [re_markup_tag1, re_markup_tag2] # # used to detect a cross-reference, after markup tags have been stripped # re_crossref = re.compile( r'@(\w*)(.*)' ) # # used to detect italic and bold styles in paragraph text # re_italic = re.compile( r"_(\w(\w|')*)_(.*)" ) # _italic_ re_bold = re.compile( r"\*(\w(\w|')*)\*(.*)" ) # *bold* # # used to detect the end of commented source lines # re_source_sep = re.compile( r'\s*/\*\s*\*/' ) # # used to perform cross-reference within source output # re_source_crossref = re.compile( r'(\W*)(\w*)' ) # # a list of reserved source keywords # re_source_keywords = re.compile( '''\\b ( typedef | struct | enum | union | const | char | int | short | long | void | signed | unsigned | \#include | \#define | \#undef | \#if | \#ifdef | \#ifndef | \#else | \#endif ) \\b''', re.VERBOSE ) ################################################################ ## ## SOURCE BLOCK CLASS ## ## A SourceProcessor is in charge of reading a C source file ## and decomposing it into a series of different "SourceBlocks". ## each one of these blocks can be made of the following data: ## ## - A documentation comment block that starts with "/**" and ## whose exact format will be discussed later ## ## - normal sources lines, including comments ## ## the important fields in a text block are the following ones: ## ## self.lines : a list of text lines for the corresponding block ## ## self.content : for documentation comment blocks only, this is the ## block content that has been "unboxed" from its ## decoration. This is None for all other blocks ## (i.e. sources or ordinary comments with no starting ## markup tag) ## class SourceBlock: def __init__( self, processor, filename, lineno, lines ): self.processor = processor self.filename = filename self.lineno = lineno self.lines = lines[:] self.format = processor.format self.content = [] if self.format == None: return words = [] # extract comment lines lines = [] for line0 in self.lines: m = self.format.column.match( line0 ) if m: lines.append( m.group( 1 ) ) # now, look for a markup tag for l in lines: l = string.strip( l ) if len( l ) > 0: for tag in re_markup_tags: if tag.match( l ): self.content = lines return def location( self ): return "(" + self.filename + ":" + repr( self.lineno ) + ")" # debugging only - not used in normal operations def dump( self ): if self.content: print "{{{content start---" for l in self.content: print l print "---content end}}}" return fmt = "" if self.format: fmt = repr( self.format.id ) + " " for line in self.lines: print line ################################################################ ## ## SOURCE PROCESSOR CLASS ## ## The SourceProcessor is in charge of reading a C source file ## and decomposing it into a series of different "SourceBlock" ## objects. ## ## each one of these blocks can be made of the following data: ## ## - A documentation comment block that starts with "/**" and ## whose exact format will be discussed later ## ## - normal sources lines, include comments ## ## class SourceProcessor: def __init__( self ): """initialize a source processor""" self.blocks = [] self.filename = None self.format = None self.lines = [] def reset( self ): """reset a block processor, clean all its blocks""" self.blocks = [] self.format = None def parse_file( self, filename ): """parse a C source file, and add its blocks to the processor's list""" self.reset() self.filename = filename fileinput.close() self.format = None self.lineno = 0 self.lines = [] for line in fileinput.input( filename ): # strip trailing newlines, important on Windows machines! if line[-1] == '\012': line = line[0:-1] if self.format == None: self.process_normal_line( line ) else: if self.format.end.match( line ): # that's a normal block end, add it to 'lines' and # create a new block self.lines.append( line ) self.add_block_lines() elif self.format.column.match( line ): # that's a normal column line, add it to 'lines' self.lines.append( line ) else: # humm.. this is an unexpected block end, # create a new block, but don't process the line self.add_block_lines() # we need to process the line again self.process_normal_line( line ) # record the last lines self.add_block_lines() def process_normal_line( self, line ): """process a normal line and check whether it is the start of a new block""" for f in re_source_block_formats: if f.start.match( line ): self.add_block_lines() self.format = f self.lineno = fileinput.filelineno() self.lines.append( line ) def add_block_lines( self ): """add the current accumulated lines and create a new block""" if self.lines != []: block = SourceBlock( self, self.filename, self.lineno, self.lines ) self.blocks.append( block ) self.format = None self.lines = [] # debugging only, not used in normal operations def dump( self ): """print all blocks in a processor""" for b in self.blocks: b.dump() # eof
[ [ 1, 0, 0.0634, 0.0029, 0, 0.66, 0, 286, 0, 5, 0, 0, 286, 0, 0 ], [ 3, 0, 0.1167, 0.0231, 0, 0.66, 0.05, 659, 0, 1, 0, 0, 0, 0, 3 ], [ 2, 1, 0.1196, 0.0173, 1, 0.43...
[ "import fileinput, re, sys, os, string", "class SourceBlockFormat:\n\n def __init__( self, id, start, column, end ):\n \"\"\"create a block pattern, used to recognize special documentation blocks\"\"\"\n self.id = id\n self.start = re.compile( start, re.VERBOSE )\n self.colum...
# Formatter (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org> # from sources import * from content import * from utils import * # This is the base Formatter class. Its purpose is to convert # a content processor's data into specific documents (i.e., table of # contents, global index, and individual API reference indices). # # You need to sub-class it to output anything sensible. For example, # the file tohtml.py contains the definition of the HtmlFormatter sub-class # used to output -- you guessed it -- HTML. # class Formatter: def __init__( self, processor ): self.processor = processor self.identifiers = {} self.chapters = processor.chapters self.sections = processor.sections.values() self.block_index = [] # store all blocks in a dictionary self.blocks = [] for section in self.sections: for block in section.blocks.values(): self.add_identifier( block.name, block ) # add enumeration values to the index, since this is useful for markup in block.markups: if markup.tag == 'values': for field in markup.fields: self.add_identifier( field.name, block ) self.block_index = self.identifiers.keys() self.block_index.sort( index_sort ) def add_identifier( self, name, block ): if self.identifiers.has_key( name ): # duplicate name! sys.stderr.write( \ "WARNING: duplicate definition for '" + name + "' in " + \ block.location() + ", previous definition in " + \ self.identifiers[name].location() + "\n" ) else: self.identifiers[name] = block # # Formatting the table of contents # def toc_enter( self ): pass def toc_chapter_enter( self, chapter ): pass def toc_section_enter( self, section ): pass def toc_section_exit( self, section ): pass def toc_chapter_exit( self, chapter ): pass def toc_index( self, index_filename ): pass def toc_exit( self ): pass def toc_dump( self, toc_filename = None, index_filename = None ): output = None if toc_filename: output = open_output( toc_filename ) self.toc_enter() for chap in self.processor.chapters: self.toc_chapter_enter( chap ) for section in chap.sections: self.toc_section_enter( section ) self.toc_section_exit( section ) self.toc_chapter_exit( chap ) self.toc_index( index_filename ) self.toc_exit() if output: close_output( output ) # # Formatting the index # def index_enter( self ): pass def index_name_enter( self, name ): pass def index_name_exit( self, name ): pass def index_exit( self ): pass def index_dump( self, index_filename = None ): output = None if index_filename: output = open_output( index_filename ) self.index_enter() for name in self.block_index: self.index_name_enter( name ) self.index_name_exit( name ) self.index_exit() if output: close_output( output ) # # Formatting a section # def section_enter( self, section ): pass def block_enter( self, block ): pass def markup_enter( self, markup, block = None ): pass def field_enter( self, field, markup = None, block = None ): pass def field_exit( self, field, markup = None, block = None ): pass def markup_exit( self, markup, block = None ): pass def block_exit( self, block ): pass def section_exit( self, section ): pass def section_dump( self, section, section_filename = None ): output = None if section_filename: output = open_output( section_filename ) self.section_enter( section ) for name in section.block_names: block = self.identifiers[name] self.block_enter( block ) for markup in block.markups[1:]: # always ignore first markup! self.markup_enter( markup, block ) for field in markup.fields: self.field_enter( field, markup, block ) self.field_exit( field, markup, block ) self.markup_exit( markup, block ) self.block_exit( block ) self.section_exit( section ) if output: close_output( output ) def section_dump_all( self ): for section in self.sections: self.section_dump( section ) # eof
[ [ 1, 0, 0.0213, 0.0053, 0, 0.66, 0, 648, 0, 1, 0, 0, 648, 0, 0 ], [ 1, 0, 0.0266, 0.0053, 0, 0.66, 0.3333, 273, 0, 1, 0, 0, 273, 0, 0 ], [ 1, 0, 0.0319, 0.0053, 0, ...
[ "from sources import *", "from content import *", "from utils import *", "class Formatter:\n\n def __init__( self, processor ):\n self.processor = processor\n self.identifiers = {}\n self.chapters = processor.chapters\n self.sections = processor.sections.values()\n ...
#!/usr/bin/env python # # DocMaker (c) 2002, 2004, 2008 David Turner <david@freetype.org> # # This program is a re-write of the original DocMaker took used # to generate the API Reference of the FreeType font engine # by converting in-source comments into structured HTML. # # This new version is capable of outputting XML data, as well # as accepts more liberal formatting options. # # It also uses regular expression matching and substitution # to speed things significantly. # from sources import * from content import * from utils import * from formatter import * from tohtml import * import utils import sys, os, time, string, glob, getopt def usage(): print "\nDocMaker Usage information\n" print " docmaker [options] file1 [file2 ...]\n" print "using the following options:\n" print " -h : print this page" print " -t : set project title, as in '-t \"My Project\"'" print " -o : set output directory, as in '-o mydir'" print " -p : set documentation prefix, as in '-p ft2'" print "" print " --title : same as -t, as in '--title=\"My Project\"'" print " --output : same as -o, as in '--output=mydir'" print " --prefix : same as -p, as in '--prefix=ft2'" def main( argv ): """main program loop""" global output_dir try: opts, args = getopt.getopt( sys.argv[1:], \ "ht:o:p:", \ ["help", "title=", "output=", "prefix="] ) except getopt.GetoptError: usage() sys.exit( 2 ) if args == []: usage() sys.exit( 1 ) # process options # project_title = "Project" project_prefix = None output_dir = None for opt in opts: if opt[0] in ( "-h", "--help" ): usage() sys.exit( 0 ) if opt[0] in ( "-t", "--title" ): project_title = opt[1] if opt[0] in ( "-o", "--output" ): utils.output_dir = opt[1] if opt[0] in ( "-p", "--prefix" ): project_prefix = opt[1] check_output() # create context and processor source_processor = SourceProcessor() content_processor = ContentProcessor() # retrieve the list of files to process file_list = make_file_list( args ) for filename in file_list: source_processor.parse_file( filename ) content_processor.parse_sources( source_processor ) # process sections content_processor.finish() formatter = HtmlFormatter( content_processor, project_title, project_prefix ) formatter.toc_dump() formatter.index_dump() formatter.section_dump_all() # if called from the command line # if __name__ == '__main__': main( sys.argv ) # eof
[ [ 1, 0, 0.1509, 0.0094, 0, 0.66, 0, 648, 0, 1, 0, 0, 648, 0, 0 ], [ 1, 0, 0.1604, 0.0094, 0, 0.66, 0.1111, 273, 0, 1, 0, 0, 273, 0, 0 ], [ 1, 0, 0.1698, 0.0094, 0, ...
[ "from sources import *", "from content import *", "from utils import *", "from formatter import *", "from tohtml import *", "import utils", "import sys, os, time, string, glob, getopt", "def usage():\n print(\"\\nDocMaker Usage information\\n\")\n print(\" docmaker [options] file1 [...
# Utils (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org> # import string, sys, os, glob # current output directory # output_dir = None # This function is used to sort the index. It is a simple lexicographical # sort, except that it places capital letters before lowercase ones. # def index_sort( s1, s2 ): if not s1: return -1 if not s2: return 1 l1 = len( s1 ) l2 = len( s2 ) m1 = string.lower( s1 ) m2 = string.lower( s2 ) for i in range( l1 ): if i >= l2 or m1[i] > m2[i]: return 1 if m1[i] < m2[i]: return -1 if s1[i] < s2[i]: return -1 if s1[i] > s2[i]: return 1 if l2 > l1: return -1 return 0 # Sort input_list, placing the elements of order_list in front. # def sort_order_list( input_list, order_list ): new_list = order_list[:] for id in input_list: if not id in order_list: new_list.append( id ) return new_list # Open the standard output to a given project documentation file. Use # "output_dir" to determine the filename location if necessary and save the # old stdout in a tuple that is returned by this function. # def open_output( filename ): global output_dir if output_dir and output_dir != "": filename = output_dir + os.sep + filename old_stdout = sys.stdout new_file = open( filename, "w" ) sys.stdout = new_file return ( new_file, old_stdout ) # Close the output that was returned by "close_output". # def close_output( output ): output[0].close() sys.stdout = output[1] # Check output directory. # def check_output(): global output_dir if output_dir: if output_dir != "": if not os.path.isdir( output_dir ): sys.stderr.write( "argument" + " '" + output_dir + "' " + \ "is not a valid directory" ) sys.exit( 2 ) else: output_dir = None def file_exists( pathname ): """checks that a given file exists""" result = 1 try: file = open( pathname, "r" ) file.close() except: result = None sys.stderr.write( pathname + " couldn't be accessed\n" ) return result def make_file_list( args = None ): """builds a list of input files from command-line arguments""" file_list = [] # sys.stderr.write( repr( sys.argv[1 :] ) + '\n' ) if not args: args = sys.argv[1 :] for pathname in args: if string.find( pathname, '*' ) >= 0: newpath = glob.glob( pathname ) newpath.sort() # sort files -- this is important because # of the order of files else: newpath = [pathname] file_list.extend( newpath ) if len( file_list ) == 0: file_list = None else: # now filter the file list to remove non-existing ones file_list = filter( file_exists, file_list ) return file_list # eof
[ [ 1, 0, 0.0303, 0.0076, 0, 0.66, 0, 890, 0, 4, 0, 0, 890, 0, 0 ], [ 14, 0, 0.0606, 0.0076, 0, 0.66, 0.125, 577, 1, 0, 0, 0, 0, 9, 0 ], [ 2, 0, 0.2121, 0.2197, 0, 0....
[ "import string, sys, os, glob", "output_dir = None", "def index_sort( s1, s2 ):\n if not s1:\n return -1\n\n if not s2:\n return 1\n\n l1 = len( s1 )", " if not s1:\n return -1", " return -1", " if not s2:\n return 1", " return 1", " l1 = le...
# Content (c) 2002, 2004, 2006, 2007, 2008, 2009 # David Turner <david@freetype.org> # # This file contains routines used to parse the content of documentation # comment blocks and build more structured objects out of them. # from sources import * from utils import * import string, re # this regular expression is used to detect code sequences. these # are simply code fragments embedded in '{' and '}' like in: # # { # x = y + z; # if ( zookoo == 2 ) # { # foobar(); # } # } # # note that indentation of the starting and ending accolades must be # exactly the same. the code sequence can contain accolades at greater # indentation # re_code_start = re.compile( r"(\s*){\s*$" ) re_code_end = re.compile( r"(\s*)}\s*$" ) # this regular expression is used to isolate identifiers from # other text # re_identifier = re.compile( r'(\w*)' ) # we collect macros ending in `_H'; while outputting the object data, we use # this info together with the object's file location to emit the appropriate # header file macro and name before the object itself # re_header_macro = re.compile( r'^#define\s{1,}(\w{1,}_H)\s{1,}<(.*)>' ) ############################################################################# # # The DocCode class is used to store source code lines. # # 'self.lines' contains a set of source code lines that will be dumped as # HTML in a <PRE> tag. # # The object is filled line by line by the parser; it strips the leading # "margin" space from each input line before storing it in 'self.lines'. # class DocCode: def __init__( self, margin, lines ): self.lines = [] self.words = None # remove margin spaces for l in lines: if string.strip( l[:margin] ) == "": l = l[margin:] self.lines.append( l ) def dump( self, prefix = "", width = 60 ): lines = self.dump_lines( 0, width ) for l in lines: print prefix + l def dump_lines( self, margin = 0, width = 60 ): result = [] for l in self.lines: result.append( " " * margin + l ) return result ############################################################################# # # The DocPara class is used to store "normal" text paragraph. # # 'self.words' contains the list of words that make up the paragraph # class DocPara: def __init__( self, lines ): self.lines = None self.words = [] for l in lines: l = string.strip( l ) self.words.extend( string.split( l ) ) def dump( self, prefix = "", width = 60 ): lines = self.dump_lines( 0, width ) for l in lines: print prefix + l def dump_lines( self, margin = 0, width = 60 ): cur = "" # current line col = 0 # current width result = [] for word in self.words: ln = len( word ) if col > 0: ln = ln + 1 if col + ln > width: result.append( " " * margin + cur ) cur = word col = len( word ) else: if col > 0: cur = cur + " " cur = cur + word col = col + ln if col > 0: result.append( " " * margin + cur ) return result ############################################################################# # # The DocField class is used to store a list containing either DocPara or # DocCode objects. Each DocField also has an optional "name" which is used # when the object corresponds to a field or value definition # class DocField: def __init__( self, name, lines ): self.name = name # can be None for normal paragraphs/sources self.items = [] # list of items mode_none = 0 # start parsing mode mode_code = 1 # parsing code sequences mode_para = 3 # parsing normal paragraph margin = -1 # current code sequence indentation cur_lines = [] # now analyze the markup lines to see if they contain paragraphs, # code sequences or fields definitions # start = 0 mode = mode_none for l in lines: # are we parsing a code sequence ? if mode == mode_code: m = re_code_end.match( l ) if m and len( m.group( 1 ) ) <= margin: # that's it, we finished the code sequence code = DocCode( 0, cur_lines ) self.items.append( code ) margin = -1 cur_lines = [] mode = mode_none else: # nope, continue the code sequence cur_lines.append( l[margin:] ) else: # start of code sequence ? m = re_code_start.match( l ) if m: # save current lines if cur_lines: para = DocPara( cur_lines ) self.items.append( para ) cur_lines = [] # switch to code extraction mode margin = len( m.group( 1 ) ) mode = mode_code else: if not string.split( l ) and cur_lines: # if the line is empty, we end the current paragraph, # if any para = DocPara( cur_lines ) self.items.append( para ) cur_lines = [] else: # otherwise, simply add the line to the current # paragraph cur_lines.append( l ) if mode == mode_code: # unexpected end of code sequence code = DocCode( margin, cur_lines ) self.items.append( code ) elif cur_lines: para = DocPara( cur_lines ) self.items.append( para ) def dump( self, prefix = "" ): if self.field: print prefix + self.field + " ::" prefix = prefix + "----" first = 1 for p in self.items: if not first: print "" p.dump( prefix ) first = 0 def dump_lines( self, margin = 0, width = 60 ): result = [] nl = None for p in self.items: if nl: result.append( "" ) result.extend( p.dump_lines( margin, width ) ) nl = 1 return result # this regular expression is used to detect field definitions # re_field = re.compile( r"\s*(\w*|\w(\w|\.)*\w)\s*::" ) class DocMarkup: def __init__( self, tag, lines ): self.tag = string.lower( tag ) self.fields = [] cur_lines = [] field = None mode = 0 for l in lines: m = re_field.match( l ) if m: # we detected the start of a new field definition # first, save the current one if cur_lines: f = DocField( field, cur_lines ) self.fields.append( f ) cur_lines = [] field = None field = m.group( 1 ) # record field name ln = len( m.group( 0 ) ) l = " " * ln + l[ln:] cur_lines = [l] else: cur_lines.append( l ) if field or cur_lines: f = DocField( field, cur_lines ) self.fields.append( f ) def get_name( self ): try: return self.fields[0].items[0].words[0] except: return None def get_start( self ): try: result = "" for word in self.fields[0].items[0].words: result = result + " " + word return result[1:] except: return "ERROR" def dump( self, margin ): print " " * margin + "<" + self.tag + ">" for f in self.fields: f.dump( " " ) print " " * margin + "</" + self.tag + ">" class DocChapter: def __init__( self, block ): self.block = block self.sections = [] if block: self.name = block.name self.title = block.get_markup_words( "title" ) self.order = block.get_markup_words( "sections" ) else: self.name = "Other" self.title = string.split( "Miscellaneous" ) self.order = [] class DocSection: def __init__( self, name = "Other" ): self.name = name self.blocks = {} self.block_names = [] # ordered block names in section self.defs = [] self.abstract = "" self.description = "" self.order = [] self.title = "ERROR" self.chapter = None def add_def( self, block ): self.defs.append( block ) def add_block( self, block ): self.block_names.append( block.name ) self.blocks[block.name] = block def process( self ): # look up one block that contains a valid section description for block in self.defs: title = block.get_markup_text( "title" ) if title: self.title = title self.abstract = block.get_markup_words( "abstract" ) self.description = block.get_markup_items( "description" ) self.order = block.get_markup_words( "order" ) return def reorder( self ): self.block_names = sort_order_list( self.block_names, self.order ) class ContentProcessor: def __init__( self ): """initialize a block content processor""" self.reset() self.sections = {} # dictionary of documentation sections self.section = None # current documentation section self.chapters = [] # list of chapters self.headers = {} # dictionary of header macros def set_section( self, section_name ): """set current section during parsing""" if not self.sections.has_key( section_name ): section = DocSection( section_name ) self.sections[section_name] = section self.section = section else: self.section = self.sections[section_name] def add_chapter( self, block ): chapter = DocChapter( block ) self.chapters.append( chapter ) def reset( self ): """reset the content processor for a new block""" self.markups = [] self.markup = None self.markup_lines = [] def add_markup( self ): """add a new markup section""" if self.markup and self.markup_lines: # get rid of last line of markup if it's empty marks = self.markup_lines if len( marks ) > 0 and not string.strip( marks[-1] ): self.markup_lines = marks[:-1] m = DocMarkup( self.markup, self.markup_lines ) self.markups.append( m ) self.markup = None self.markup_lines = [] def process_content( self, content ): """process a block content and return a list of DocMarkup objects corresponding to it""" markup = None markup_lines = [] first = 1 for line in content: found = None for t in re_markup_tags: m = t.match( line ) if m: found = string.lower( m.group( 1 ) ) prefix = len( m.group( 0 ) ) line = " " * prefix + line[prefix:] # remove markup from line break # is it the start of a new markup section ? if found: first = 0 self.add_markup() # add current markup content self.markup = found if len( string.strip( line ) ) > 0: self.markup_lines.append( line ) elif first == 0: self.markup_lines.append( line ) self.add_markup() return self.markups def parse_sources( self, source_processor ): blocks = source_processor.blocks count = len( blocks ) for n in range( count ): source = blocks[n] if source.content: # this is a documentation comment, we need to catch # all following normal blocks in the "follow" list # follow = [] m = n + 1 while m < count and not blocks[m].content: follow.append( blocks[m] ) m = m + 1 doc_block = DocBlock( source, follow, self ) def finish( self ): # process all sections to extract their abstract, description # and ordered list of items # for sec in self.sections.values(): sec.process() # process chapters to check that all sections are correctly # listed there for chap in self.chapters: for sec in chap.order: if self.sections.has_key( sec ): section = self.sections[sec] section.chapter = chap section.reorder() chap.sections.append( section ) else: sys.stderr.write( "WARNING: chapter '" + \ chap.name + "' in " + chap.block.location() + \ " lists unknown section '" + sec + "'\n" ) # check that all sections are in a chapter # others = [] for sec in self.sections.values(): if not sec.chapter: others.append( sec ) # create a new special chapter for all remaining sections # when necessary # if others: chap = DocChapter( None ) chap.sections = others self.chapters.append( chap ) class DocBlock: def __init__( self, source, follow, processor ): processor.reset() self.source = source self.code = [] self.type = "ERRTYPE" self.name = "ERRNAME" self.section = processor.section self.markups = processor.process_content( source.content ) # compute block type from first markup tag try: self.type = self.markups[0].tag except: pass # compute block name from first markup paragraph try: markup = self.markups[0] para = markup.fields[0].items[0] name = para.words[0] m = re_identifier.match( name ) if m: name = m.group( 1 ) self.name = name except: pass if self.type == "section": # detect new section starts processor.set_section( self.name ) processor.section.add_def( self ) elif self.type == "chapter": # detect new chapter processor.add_chapter( self ) else: processor.section.add_block( self ) # now, compute the source lines relevant to this documentation # block. We keep normal comments in for obvious reasons (??) source = [] for b in follow: if b.format: break for l in b.lines: # collect header macro definitions m = re_header_macro.match( l ) if m: processor.headers[m.group( 2 )] = m.group( 1 ); # we use "/* */" as a separator if re_source_sep.match( l ): break source.append( l ) # now strip the leading and trailing empty lines from the sources start = 0 end = len( source ) - 1 while start < end and not string.strip( source[start] ): start = start + 1 while start < end and not string.strip( source[end] ): end = end - 1 if start == end and not string.strip( source[start] ): self.code = [] else: self.code = source[start:end + 1] def location( self ): return self.source.location() def get_markup( self, tag_name ): """return the DocMarkup corresponding to a given tag in a block""" for m in self.markups: if m.tag == string.lower( tag_name ): return m return None def get_markup_name( self, tag_name ): """return the name of a given primary markup in a block""" try: m = self.get_markup( tag_name ) return m.get_name() except: return None def get_markup_words( self, tag_name ): try: m = self.get_markup( tag_name ) return m.fields[0].items[0].words except: return [] def get_markup_text( self, tag_name ): result = self.get_markup_words( tag_name ) return string.join( result ) def get_markup_items( self, tag_name ): try: m = self.get_markup( tag_name ) return m.fields[0].items except: return None # eof
[ [ 1, 0, 0.0137, 0.0017, 0, 0.66, 0, 648, 0, 1, 0, 0, 648, 0, 0 ], [ 1, 0, 0.0154, 0.0017, 0, 0.66, 0.0667, 970, 0, 1, 0, 0, 970, 0, 0 ], [ 1, 0, 0.0171, 0.0017, 0, ...
[ "from sources import *", "from utils import *", "import string, re", "re_code_start = re.compile( r\"(\\s*){\\s*$\" )", "re_code_end = re.compile( r\"(\\s*)}\\s*$\" )", "re_identifier = re.compile( r'(\\w*)' )", "re_header_macro = re.compile( r'^#define\\s{1,}(\\w{1,}_H)\\s{1,}<(.*)>' )", "class Do...
#!/usr/bin/env python # # DocBeauty (c) 2003, 2004, 2008 David Turner <david@freetype.org> # # This program is used to beautify the documentation comments used # in the FreeType 2 public headers. # from sources import * from content import * from utils import * import utils import sys, os, time, string, getopt content_processor = ContentProcessor() def beautify_block( block ): if block.content: content_processor.reset() markups = content_processor.process_content( block.content ) text = [] first = 1 for markup in markups: text.extend( markup.beautify( first ) ) first = 0 # now beautify the documentation "borders" themselves lines = [" /*************************************************************************"] for l in text: lines.append( " *" + l ) lines.append( " */" ) block.lines = lines def usage(): print "\nDocBeauty 0.1 Usage information\n" print " docbeauty [options] file1 [file2 ...]\n" print "using the following options:\n" print " -h : print this page" print " -b : backup original files with the 'orig' extension" print "" print " --backup : same as -b" def main( argv ): """main program loop""" global output_dir try: opts, args = getopt.getopt( sys.argv[1:], \ "hb", \ ["help", "backup"] ) except getopt.GetoptError: usage() sys.exit( 2 ) if args == []: usage() sys.exit( 1 ) # process options # output_dir = None do_backup = None for opt in opts: if opt[0] in ( "-h", "--help" ): usage() sys.exit( 0 ) if opt[0] in ( "-b", "--backup" ): do_backup = 1 # create context and processor source_processor = SourceProcessor() # retrieve the list of files to process file_list = make_file_list( args ) for filename in file_list: source_processor.parse_file( filename ) for block in source_processor.blocks: beautify_block( block ) new_name = filename + ".new" ok = None try: file = open( new_name, "wt" ) for block in source_processor.blocks: for line in block.lines: file.write( line ) file.write( "\n" ) file.close() except: ok = 0 # if called from the command line # if __name__ == '__main__': main( sys.argv ) # eof
[ [ 1, 0, 0.0796, 0.0088, 0, 0.66, 0, 648, 0, 1, 0, 0, 648, 0, 0 ], [ 1, 0, 0.0885, 0.0088, 0, 0.66, 0.1111, 273, 0, 1, 0, 0, 273, 0, 0 ], [ 1, 0, 0.0973, 0.0088, 0, ...
[ "from sources import *", "from content import *", "from utils import *", "import utils", "import sys, os, time, string, getopt", "content_processor = ContentProcessor()", "def beautify_block( block ):\n if block.content:\n content_processor.reset()\n\n markups = content_processor.pro...