code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#
| Python |
import os
import re
from webob import Request, Response
from webob import exc
from tempita import HTMLTemplate
VIEW_TEMPLATE = HTMLTemplate("""\
<html>
<head>
<title>{{page.title}}</title>
</head>
<body>
<h1>{{page.title}}</h1>
{{if message}}
<div style="background-color: #99f">{{message}}</div>
{{endif}}
<div>{{page.content|html}}</div>
<hr>
<a href="{{req.url}}?action=edit">Edit</a>
</body>
</html>
""")
EDIT_TEMPLATE = HTMLTemplate("""\
<html>
<head>
<title>Edit: {{page.title}}</title>
</head>
<body>
{{if page.exists}}
<h1>Edit: {{page.title}}</h1>
{{else}}
<h1>Create: {{page.title}}</h1>
{{endif}}
<form action="{{req.path_url}}" method="POST">
<input type="hidden" name="mtime" value="{{page.mtime}}">
Title: <input type="text" name="title" style="width: 70%" value="{{page.title}}"><br>
Content: <input type="submit" value="Save">
<a href="{{req.path_url}}">Cancel</a>
<br>
<textarea name="content" style="width: 100%; height: 75%" rows="40">{{page.content}}</textarea>
<br>
<input type="submit" value="Save">
<a href="{{req.path_url}}">Cancel</a>
</form>
</body></html>
""")
class WikiApp(object):
view_template = VIEW_TEMPLATE
edit_template = EDIT_TEMPLATE
def __init__(self, storage_dir):
self.storage_dir = os.path.abspath(os.path.normpath(storage_dir))
def __call__(self, environ, start_response):
req = Request(environ)
action = req.params.get('action', 'view')
page = self.get_page(req.path_info)
try:
try:
meth = getattr(self, 'action_%s_%s' % (action, req.method))
except AttributeError:
raise exc.HTTPBadRequest('No such action %r' % action).exception
resp = meth(req, page)
except exc.HTTPException, e:
resp = e
return resp(environ, start_response)
def get_page(self, path):
path = path.lstrip('/')
if not path:
path = 'index'
path = os.path.join(self.storage_dir)
path = os.path.normpath(path)
if path.endswith('/'):
path += 'index'
if not path.startswith(self.storage_dir):
raise exc.HTTPBadRequest("Bad path").exception
path += '.html'
return Page(path)
def action_view_GET(self, req, page):
if not page.exists:
return exc.HTTPTemporaryRedirect(
location=req.url + '?action=edit')
if req.cookies.get('message'):
message = req.cookies['message']
else:
message = None
text = self.view_template.substitute(
page=page, req=req, message=message)
resp = Response(text)
if message:
resp.delete_cookie('message')
else:
resp.last_modified = page.mtime
resp.conditional_response = True
return resp
def action_view_POST(self, req, page):
submit_mtime = int(req.params.get('mtime') or '0') or None
if page.mtime != submit_mtime:
return exc.HTTPPreconditionFailed(
"The page has been updated since you started editing it")
page.set(
title=req.params['title'],
content=req.params['content'])
resp = exc.HTTPSeeOther(
location=req.path_url)
resp.set_cookie('message', 'Page updated')
return resp
def action_edit_GET(self, req, page):
text = self.edit_template.substitute(
page=page, req=req)
return Response(text)
class Page(object):
def __init__(self, filename):
self.filename = filename
@property
def exists(self):
return os.path.exists(self.filename)
@property
def title(self):
if not self.exists:
# we need to guess the title
basename = os.path.splitext(os.path.basename(self.filename))[0]
basename = re.sub(r'[_-]', ' ', basename)
return basename.capitalize()
content = self.full_content
match = re.search(r'<title>(.*?)</title>', content, re.I|re.S)
return match.group(1)
@property
def full_content(self):
f = open(self.filename, 'rb')
try:
return f.read()
finally:
f.close()
@property
def content(self):
if not self.exists:
return ''
content = self.full_content
match = re.search(r'<body[^>]*>(.*?)</body>', content, re.I|re.S)
return match.group(1)
@property
def mtime(self):
if not self.exists:
return None
else:
return os.stat(self.filename).st_mtime
def set(self, title, content):
dir = os.path.dirname(self.filename)
if not os.path.exists(dir):
os.makedirs(dir)
new_content = """<html><head><title>%s</title></head><body>%s</body></html>""" % (
title, content)
f = open(self.filename, 'wb')
f.write(new_content)
f.close()
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser(
usage='%prog --port=PORT'
)
parser.add_option(
'-p', '--port',
default='8080',
dest='port',
type='int',
help='Port to serve on (default 8080)')
parser.add_option(
'--wiki-data',
default='./wiki',
dest='wiki_data',
help='Place to put wiki data into (default ./wiki/)')
options, args = parser.parse_args()
print 'Writing wiki pages to %s' % options.wiki_data
app = WikiApp(options.wiki_data)
from wsgiref.simple_server import make_server
httpd = make_server('localhost', options.port, app)
print 'Serving on http://localhost:%s' % options.port
try:
httpd.serve_forever()
except KeyboardInterrupt:
print '^C'
| Python |
import os
import urllib
import time
import re
from cPickle import load, dump
from webob import Request, Response, html_escape
from webob import exc
class Commenter(object):
def __init__(self, app, storage_dir):
self.app = app
self.storage_dir = storage_dir
if not os.path.exists(storage_dir):
os.makedirs(storage_dir)
def __call__(self, environ, start_response):
req = Request(environ)
if req.path_info_peek() == '.comments':
return self.process_comment(req)(environ, start_response)
# This is the base path of *this* middleware:
base_url = req.application_url
resp = req.get_response(self.app)
if resp.content_type != 'text/html' or resp.status_int != 200:
# Not an HTML response, we don't want to
# do anything to it
return resp(environ, start_response)
# Make sure the content isn't gzipped:
resp.decode_content()
comments = self.get_data(req.url)
body = resp.body
body = self.add_to_end(body, self.format_comments(comments))
body = self.add_to_end(body, self.submit_form(base_url, req))
resp.body = body
return resp(environ, start_response)
def get_data(self, url):
# Double-quoting makes the filename safe
filename = self.url_filename(url)
if not os.path.exists(filename):
return []
else:
f = open(filename, 'rb')
data = load(f)
f.close()
return data
def save_data(self, url, data):
filename = self.url_filename(url)
f = open(filename, 'wb')
dump(data, f)
f.close()
def url_filename(self, url):
return os.path.join(self.storage_dir, urllib.quote(url, ''))
_end_body_re = re.compile(r'</body.*?>', re.I|re.S)
def add_to_end(self, html, extra_html):
"""
Adds extra_html to the end of the html page (before </body>)
"""
match = self._end_body_re.search(html)
if not match:
return html + extra_html
else:
return html[:match.start()] + extra_html + html[match.start():]
def format_comments(self, comments):
if not comments:
return ''
text = []
text.append('<hr>')
text.append('<h2><a name="comment-area"></a>Comments (%s):</h2>' % len(comments))
for comment in comments:
text.append('<h3><a href="%s">%s</a> at %s:</h3>' % (
html_escape(comment['homepage']), html_escape(comment['name']),
time.strftime('%c', comment['time'])))
# Susceptible to XSS attacks!:
text.append(comment['comments'])
return ''.join(text)
def submit_form(self, base_path, req):
return '''<h2>Leave a comment:</h2>
<form action="%s/.comments" method="POST">
<input type="hidden" name="url" value="%s">
<table width="100%%">
<tr><td>Name:</td>
<td><input type="text" name="name" style="width: 100%%"></td></tr>
<tr><td>URL:</td>
<td><input type="text" name="homepage" style="width: 100%%"></td></tr>
</table>
Comments:<br>
<textarea name="comments" rows=10 style="width: 100%%"></textarea><br>
<input type="submit" value="Submit comment">
</form>
''' % (base_path, html_escape(req.url))
def process_comment(self, req):
try:
url = req.params['url']
name = req.params['name']
homepage = req.params['homepage']
comments = req.params['comments']
except KeyError, e:
resp = exc.HTTPBadRequest('Missing parameter: %s' % e)
return resp
data = self.get_data(url)
data.append(dict(
name=name,
homepage=homepage,
comments=comments,
time=time.gmtime()))
self.save_data(url, data)
resp = exc.HTTPSeeOther(location=url+'#comment-area')
return resp
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser(
usage='%prog --port=PORT BASE_DIRECTORY'
)
parser.add_option(
'-p', '--port',
default='8080',
dest='port',
type='int',
help='Port to serve on (default 8080)')
parser.add_option(
'--comment-data',
default='./comments',
dest='comment_data',
help='Place to put comment data into (default ./comments/)')
options, args = parser.parse_args()
if not args:
parser.error('You must give a BASE_DIRECTORY')
base_dir = args[0]
from paste.urlparser import StaticURLParser
app = StaticURLParser(base_dir)
app = Commenter(app, options.comment_data)
from wsgiref.simple_server import make_server
httpd = make_server('localhost', options.port, app)
print 'Serving on http://localhost:%s' % options.port
try:
httpd.serve_forever()
except KeyboardInterrupt:
print '^C'
| Python |
#!/usr/bin/python
import yaml, codecs, sys, os.path, optparse
class Style:
def __init__(self, header=None, footer=None,
tokens=None, events=None, replaces=None):
self.header = header
self.footer = footer
self.replaces = replaces
self.substitutions = {}
for domain, Class in [(tokens, 'Token'), (events, 'Event')]:
if not domain:
continue
for key in domain:
name = ''.join([part.capitalize() for part in key.split('-')])
cls = getattr(yaml, '%s%s' % (name, Class))
value = domain[key]
if not value:
continue
start = value.get('start')
end = value.get('end')
if start:
self.substitutions[cls, -1] = start
if end:
self.substitutions[cls, +1] = end
def __setstate__(self, state):
self.__init__(**state)
yaml.add_path_resolver(u'tag:yaml.org,2002:python/object:__main__.Style',
[None], dict)
yaml.add_path_resolver(u'tag:yaml.org,2002:pairs',
[None, u'replaces'], list)
class YAMLHighlight:
def __init__(self, options):
config = yaml.load(file(options.config, 'rb').read())
self.style = config[options.style]
if options.input:
self.input = file(options.input, 'rb')
else:
self.input = sys.stdin
if options.output:
self.output = file(options.output, 'wb')
else:
self.output = sys.stdout
def highlight(self):
input = self.input.read()
if input.startswith(codecs.BOM_UTF16_LE):
input = unicode(input, 'utf-16-le')
elif input.startswith(codecs.BOM_UTF16_BE):
input = unicode(input, 'utf-16-be')
else:
input = unicode(input, 'utf-8')
substitutions = self.style.substitutions
tokens = yaml.scan(input)
events = yaml.parse(input)
markers = []
number = 0
for token in tokens:
number += 1
if token.start_mark.index != token.end_mark.index:
cls = token.__class__
if (cls, -1) in substitutions:
markers.append([token.start_mark.index, +2, number, substitutions[cls, -1]])
if (cls, +1) in substitutions:
markers.append([token.end_mark.index, -2, number, substitutions[cls, +1]])
number = 0
for event in events:
number += 1
cls = event.__class__
if (cls, -1) in substitutions:
markers.append([event.start_mark.index, +1, number, substitutions[cls, -1]])
if (cls, +1) in substitutions:
markers.append([event.end_mark.index, -1, number, substitutions[cls, +1]])
markers.sort()
markers.reverse()
chunks = []
position = len(input)
for index, weight1, weight2, substitution in markers:
if index < position:
chunk = input[index:position]
for substring, replacement in self.style.replaces:
chunk = chunk.replace(substring, replacement)
chunks.append(chunk)
position = index
chunks.append(substitution)
chunks.reverse()
result = u''.join(chunks)
if self.style.header:
self.output.write(self.style.header)
self.output.write(result.encode('utf-8'))
if self.style.footer:
self.output.write(self.style.footer)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-s', '--style', dest='style', default='ascii',
help="specify the highlighting style", metavar='STYLE')
parser.add_option('-c', '--config', dest='config',
default=os.path.join(os.path.dirname(sys.argv[0]), 'yaml_hl.cfg'),
help="set an alternative configuration file", metavar='CONFIG')
parser.add_option('-i', '--input', dest='input', default=None,
help="set the input file (default: stdin)", metavar='FILE')
parser.add_option('-o', '--output', dest='output', default=None,
help="set the output file (default: stdout)", metavar='FILE')
(options, args) = parser.parse_args()
hl = YAMLHighlight(options)
hl.highlight()
| Python |
#!/usr/bin/python
import yaml, codecs, sys, os.path, optparse
class Style:
def __init__(self, header=None, footer=None,
tokens=None, events=None, replaces=None):
self.header = header
self.footer = footer
self.replaces = replaces
self.substitutions = {}
for domain, Class in [(tokens, 'Token'), (events, 'Event')]:
if not domain:
continue
for key in domain:
name = ''.join([part.capitalize() for part in key.split('-')])
cls = getattr(yaml, '%s%s' % (name, Class))
value = domain[key]
if not value:
continue
start = value.get('start')
end = value.get('end')
if start:
self.substitutions[cls, -1] = start
if end:
self.substitutions[cls, +1] = end
def __setstate__(self, state):
self.__init__(**state)
yaml.add_path_resolver(u'tag:yaml.org,2002:python/object:__main__.Style',
[None], dict)
yaml.add_path_resolver(u'tag:yaml.org,2002:pairs',
[None, u'replaces'], list)
class YAMLHighlight:
def __init__(self, options):
config = yaml.load(file(options.config, 'rb').read())
self.style = config[options.style]
if options.input:
self.input = file(options.input, 'rb')
else:
self.input = sys.stdin
if options.output:
self.output = file(options.output, 'wb')
else:
self.output = sys.stdout
def highlight(self):
input = self.input.read()
if input.startswith(codecs.BOM_UTF16_LE):
input = unicode(input, 'utf-16-le')
elif input.startswith(codecs.BOM_UTF16_BE):
input = unicode(input, 'utf-16-be')
else:
input = unicode(input, 'utf-8')
substitutions = self.style.substitutions
tokens = yaml.scan(input)
events = yaml.parse(input)
markers = []
number = 0
for token in tokens:
number += 1
if token.start_mark.index != token.end_mark.index:
cls = token.__class__
if (cls, -1) in substitutions:
markers.append([token.start_mark.index, +2, number, substitutions[cls, -1]])
if (cls, +1) in substitutions:
markers.append([token.end_mark.index, -2, number, substitutions[cls, +1]])
number = 0
for event in events:
number += 1
cls = event.__class__
if (cls, -1) in substitutions:
markers.append([event.start_mark.index, +1, number, substitutions[cls, -1]])
if (cls, +1) in substitutions:
markers.append([event.end_mark.index, -1, number, substitutions[cls, +1]])
markers.sort()
markers.reverse()
chunks = []
position = len(input)
for index, weight1, weight2, substitution in markers:
if index < position:
chunk = input[index:position]
for substring, replacement in self.style.replaces:
chunk = chunk.replace(substring, replacement)
chunks.append(chunk)
position = index
chunks.append(substitution)
chunks.reverse()
result = u''.join(chunks)
if self.style.header:
self.output.write(self.style.header)
self.output.write(result.encode('utf-8'))
if self.style.footer:
self.output.write(self.style.footer)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-s', '--style', dest='style', default='ascii',
help="specify the highlighting style", metavar='STYLE')
parser.add_option('-c', '--config', dest='config',
default=os.path.join(os.path.dirname(sys.argv[0]), 'yaml_hl.cfg'),
help="set an alternative configuration file", metavar='CONFIG')
parser.add_option('-i', '--input', dest='input', default=None,
help="set the input file (default: stdin)", metavar='FILE')
parser.add_option('-o', '--output', dest='output', default=None,
help="set the output file (default: stdout)", metavar='FILE')
(options, args) = parser.parse_args()
hl = YAMLHighlight(options)
hl.highlight()
| Python |
NAME = 'PyYAML'
VERSION = '3.05'
DESCRIPTION = "YAML parser and emitter for Python"
LONG_DESCRIPTION = """\
YAML is a data serialization format designed for human readability and
interaction with scripting languages. PyYAML is a YAML parser and
emitter for Python.
PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
support, capable extension API, and sensible error messages. PyYAML
supports standard YAML tags and provides Python-specific tags that allow
to represent an arbitrary Python object.
PyYAML is applicable for a broad range of tasks from complex
configuration files to object serialization and persistance."""
AUTHOR = "Kirill Simonov"
AUTHOR_EMAIL = 'xi@resolvent.net'
LICENSE = "MIT"
PLATFORMS = "Any"
URL = "http://pyyaml.org/wiki/PyYAML"
DOWNLOAD_URL = "http://pyyaml.org/download/pyyaml/%s-%s.tar.gz" % (NAME, VERSION)
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
]
from distutils.core import setup
if __name__ == '__main__':
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
platforms=PLATFORMS,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
package_dir={'': 'lib'},
packages=['yaml'],
)
| Python |
from setup import *
from distutils.core import setup
from distutils.extension import Extension
from Pyrex.Distutils import build_ext
if __name__ == '__main__':
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
platforms=PLATFORMS,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
package_dir={'': 'lib'},
packages=['yaml'],
ext_modules=[
Extension("_yaml", ["ext/_yaml.pyx"], libraries=['yaml']),
],
cmdclass = {'build_ext': build_ext}
)
| Python |
class Token(object):
def __init__(self, start_mark, end_mark):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in self.__dict__
if not key.endswith('_mark')]
attributes.sort()
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
#class BOMToken(Token):
# id = '<byte order mark>'
class DirectiveToken(Token):
id = '<directive>'
def __init__(self, name, value, start_mark, end_mark):
self.name = name
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class DocumentStartToken(Token):
id = '<document start>'
class DocumentEndToken(Token):
id = '<document end>'
class StreamStartToken(Token):
id = '<stream start>'
def __init__(self, start_mark=None, end_mark=None,
encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndToken(Token):
id = '<stream end>'
class BlockSequenceStartToken(Token):
id = '<block sequence start>'
class BlockMappingStartToken(Token):
id = '<block mapping start>'
class BlockEndToken(Token):
id = '<block end>'
class FlowSequenceStartToken(Token):
id = '['
class FlowMappingStartToken(Token):
id = '{'
class FlowSequenceEndToken(Token):
id = ']'
class FlowMappingEndToken(Token):
id = '}'
class KeyToken(Token):
id = '?'
class ValueToken(Token):
id = ':'
class BlockEntryToken(Token):
id = '-'
class FlowEntryToken(Token):
id = ','
class AliasToken(Token):
id = '<alias>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class AnchorToken(Token):
id = '<anchor>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class TagToken(Token):
id = '<tag>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class ScalarToken(Token):
id = '<scalar>'
def __init__(self, value, plain, start_mark, end_mark, style=None):
self.value = value
self.plain = plain
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
| Python |
# Scanner produces tokens of the following types:
# STREAM-START
# STREAM-END
# DIRECTIVE(name, value)
# DOCUMENT-START
# DOCUMENT-END
# BLOCK-SEQUENCE-START
# BLOCK-MAPPING-START
# BLOCK-END
# FLOW-SEQUENCE-START
# FLOW-MAPPING-START
# FLOW-SEQUENCE-END
# FLOW-MAPPING-END
# BLOCK-ENTRY
# FLOW-ENTRY
# KEY
# VALUE
# ALIAS(value)
# ANCHOR(value)
# TAG(value)
# SCALAR(value, plain, style)
#
# Read comments in the Scanner code for more details.
#
__all__ = ['Scanner', 'ScannerError']
from error import MarkedYAMLError
from tokens import *
class ScannerError(MarkedYAMLError):
pass
class SimpleKey(object):
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
self.token_number = token_number
self.required = required
self.index = index
self.line = line
self.column = column
self.mark = mark
class Scanner(object):
def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
# Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {}
# Public methods.
def check_token(self, *choices):
# Check if the next token is one of the given types.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
# Return the next token, but do not delete if from the queue.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
def get_token(self):
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
# Private methods.
def need_more_tokens(self):
if self.done:
return False
if not self.tokens:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
def fetch_more_tokens(self):
# Eat whitespaces and comments until we reach the next token.
self.scan_to_next_token()
# Remove obsolete possible simple keys.
self.stale_possible_simple_keys()
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
self.unwind_indent(self.column)
# Peek the next character.
ch = self.peek()
# Is it the end of stream?
if ch == u'\0':
return self.fetch_stream_end()
# Is it a directive?
if ch == u'%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
if ch == u'-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
if ch == u'.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
#if ch == u'\uFEFF':
# return self.fetch_bom() <-- issue BOMToken
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
if ch == u'[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
if ch == u'{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
if ch == u']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
if ch == u'}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
if ch == u',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
if ch == u'-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
if ch == u'?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
if ch == u':' and self.check_value():
return self.fetch_value()
# Is it an alias?
if ch == u'*':
return self.fetch_alias()
# Is it an anchor?
if ch == u'&':
return self.fetch_anchor()
# Is it a tag?
if ch == u'!':
return self.fetch_tag()
# Is it a literal scalar?
if ch == u'|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
if ch == u'>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
if ch == u'\'':
return self.fetch_single()
# Is it a double quoted scalar?
if ch == u'\"':
return self.fetch_double()
# It must be a plain scalar then.
if self.check_plain():
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
raise ScannerError("while scanning for the next token", None,
"found character %r that cannot start any token"
% ch.encode('utf-8'), self.get_mark())
# Simple keys treatment.
def next_possible_simple_key(self):
# Return the number of the nearest possible simple key. Actually we
# don't need to loop through the whole dictionary. We may replace it
# with the following code:
# if not self.possible_simple_keys:
# return None
# return self.possible_simple_keys[
# min(self.possible_simple_keys.keys())].token_number
min_token_number = None
for level in self.possible_simple_keys:
key = self.possible_simple_keys[level]
if min_token_number is None or key.token_number < min_token_number:
min_token_number = key.token_number
return min_token_number
def stale_possible_simple_keys(self):
# Remove entries that are no longer possible simple keys. According to
# the YAML specification, simple keys
# - should be limited to a single line,
# - should be no longer than 1024 characters.
# Disabling this procedure will allow simple keys of any length and
# height (may cause problems if indentation is broken though).
for level in self.possible_simple_keys.keys():
key = self.possible_simple_keys[level]
if key.line != self.line \
or self.index-key.index > 1024:
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
# The next token may start a simple key. We check if it's possible
# and save its position. This function is called for
# ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
# Check if a simple key is required at the current position.
required = not self.flow_level and self.indent == self.column
# A simple key is required only if it is the first token in the current
# line. Therefore it is always allowed.
assert self.allow_simple_key or not required
# The next token might be a simple key. Let's save it's number and
# position.
if self.allow_simple_key:
self.remove_possible_simple_key()
token_number = self.tokens_taken+len(self.tokens)
key = SimpleKey(token_number, required,
self.index, self.line, self.column, self.get_mark())
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
# Remove the saved possible key position at the current flow level.
if self.flow_level in self.possible_simple_keys:
key = self.possible_simple_keys[self.flow_level]
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
# Indentation functions.
def unwind_indent(self, column):
## In flow context, tokens should respect indentation.
## Actually the condition should be `self.indent >= column` according to
## the spec. But this condition will prohibit intuitively correct
## constructions such as
## key : {
## }
#if self.flow_level and self.indent > column:
# raise ScannerError(None, None,
# "invalid intendation or unclosed '[' or '{'",
# self.get_mark())
# In the flow context, indentation is ignored. We make the scanner less
# restrictive then specification requires.
if self.flow_level:
return
# In block context, we may need to issue the BLOCK-END tokens.
while self.indent > column:
mark = self.get_mark()
self.indent = self.indents.pop()
self.tokens.append(BlockEndToken(mark, mark))
def add_indent(self, column):
# Check if we need to increase indentation.
if self.indent < column:
self.indents.append(self.indent)
self.indent = column
return True
return False
# Fetchers.
def fetch_stream_start(self):
# We always add STREAM-START as the first token and STREAM-END as the
# last token.
# Read the token.
mark = self.get_mark()
# Add STREAM-START.
self.tokens.append(StreamStartToken(mark, mark,
encoding=self.encoding))
def fetch_stream_end(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset everything (not really needed).
self.allow_simple_key = False
self.possible_simple_keys = {}
# Read the token.
mark = self.get_mark()
# Add STREAM-END.
self.tokens.append(StreamEndToken(mark, mark))
# The steam is finished.
self.done = True
def fetch_directive(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Scan and add DIRECTIVE.
self.tokens.append(self.scan_directive())
def fetch_document_start(self):
self.fetch_document_indicator(DocumentStartToken)
def fetch_document_end(self):
self.fetch_document_indicator(DocumentEndToken)
def fetch_document_indicator(self, TokenClass):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys. Note that there could not be a block collection
# after '---'.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Add DOCUMENT-START or DOCUMENT-END.
start_mark = self.get_mark()
self.forward(3)
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_start(self):
self.fetch_flow_collection_start(FlowSequenceStartToken)
def fetch_flow_mapping_start(self):
self.fetch_flow_collection_start(FlowMappingStartToken)
def fetch_flow_collection_start(self, TokenClass):
# '[' and '{' may start a simple key.
self.save_possible_simple_key()
# Increase the flow level.
self.flow_level += 1
# Simple keys are allowed after '[' and '{'.
self.allow_simple_key = True
# Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_end(self):
self.fetch_flow_collection_end(FlowSequenceEndToken)
def fetch_flow_mapping_end(self):
self.fetch_flow_collection_end(FlowMappingEndToken)
def fetch_flow_collection_end(self, TokenClass):
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Decrease the flow level.
self.flow_level -= 1
# No simple keys after ']' or '}'.
self.allow_simple_key = False
# Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_entry(self):
# Simple keys are allowed after ','.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add FLOW-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(FlowEntryToken(start_mark, end_mark))
def fetch_block_entry(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a new entry?
if not self.allow_simple_key:
raise ScannerError(None, None,
"sequence entries are not allowed here",
self.get_mark())
# We may need to add BLOCK-SEQUENCE-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockSequenceStartToken(mark, mark))
# It's an error for the block entry to occur in the flow context,
# but we let the parser detect this.
else:
pass
# Simple keys are allowed after '-'.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add BLOCK-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(BlockEntryToken(start_mark, end_mark))
def fetch_key(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a key (not nessesary a simple)?
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping keys are not allowed here",
self.get_mark())
# We may need to add BLOCK-MAPPING-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after '?' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add KEY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(KeyToken(start_mark, end_mark))
def fetch_value(self):
# Do we determine a simple key?
if self.flow_level in self.possible_simple_keys:
# Add KEY.
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
self.tokens.insert(key.token_number-self.tokens_taken,
KeyToken(key.mark, key.mark))
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
if not self.flow_level:
if self.add_indent(key.column):
self.tokens.insert(key.token_number-self.tokens_taken,
BlockMappingStartToken(key.mark, key.mark))
# There cannot be two simple keys one after another.
self.allow_simple_key = False
# It must be a part of a complex key.
else:
# Block context needs additional checks.
# (Do we really need them? They will be catched by the parser
# anyway.)
if not self.flow_level:
# We are allowed to start a complex value if and only if
# we can start a simple key.
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping values are not allowed here",
self.get_mark())
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
# the parser.
if not self.flow_level:
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after ':' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add VALUE.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(ValueToken(start_mark, end_mark))
def fetch_alias(self):
# ALIAS could be a simple key.
self.save_possible_simple_key()
# No simple keys after ALIAS.
self.allow_simple_key = False
# Scan and add ALIAS.
self.tokens.append(self.scan_anchor(AliasToken))
def fetch_anchor(self):
# ANCHOR could start a simple key.
self.save_possible_simple_key()
# No simple keys after ANCHOR.
self.allow_simple_key = False
# Scan and add ANCHOR.
self.tokens.append(self.scan_anchor(AnchorToken))
def fetch_tag(self):
# TAG could start a simple key.
self.save_possible_simple_key()
# No simple keys after TAG.
self.allow_simple_key = False
# Scan and add TAG.
self.tokens.append(self.scan_tag())
def fetch_literal(self):
self.fetch_block_scalar(style='|')
def fetch_folded(self):
self.fetch_block_scalar(style='>')
def fetch_block_scalar(self, style):
# A simple key may follow a block scalar.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Scan and add SCALAR.
self.tokens.append(self.scan_block_scalar(style))
def fetch_single(self):
self.fetch_flow_scalar(style='\'')
def fetch_double(self):
self.fetch_flow_scalar(style='"')
def fetch_flow_scalar(self, style):
# A flow scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after flow scalars.
self.allow_simple_key = False
# Scan and add SCALAR.
self.tokens.append(self.scan_flow_scalar(style))
def fetch_plain(self):
# A plain scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after plain scalars. But note that `scan_plain` will
# change this flag if the scan is finished at the beginning of the
# line.
self.allow_simple_key = False
# Scan and add SCALAR. May change `allow_simple_key`.
self.tokens.append(self.scan_plain())
# Checkers.
def check_directive(self):
# DIRECTIVE: ^ '%' ...
# The '%' indicator is already checked.
if self.column == 0:
return True
def check_document_start(self):
# DOCUMENT-START: ^ '---' (' '|'\n')
if self.column == 0:
if self.prefix(3) == u'---' \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return True
def check_document_end(self):
# DOCUMENT-END: ^ '...' (' '|'\n')
if self.column == 0:
if self.prefix(3) == u'...' \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return True
def check_block_entry(self):
# BLOCK-ENTRY: '-' (' '|'\n')
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_key(self):
# KEY(flow context): '?'
if self.flow_level:
return True
# KEY(block context): '?' (' '|'\n')
else:
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_value(self):
# VALUE(flow context): ':'
if self.flow_level:
return True
# VALUE(block context): ':' (' '|'\n')
else:
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_plain(self):
# A plain scalar may start with any non-space character except:
# '-', '?', ':', ',', '[', ']', '{', '}',
# '#', '&', '*', '!', '|', '>', '\'', '\"',
# '%', '@', '`'.
#
# It may also start with
# '-', '?', ':'
# if it is followed by a non-space character.
#
# Note that we limit the last rule to the block context (except the
# '-' character) because we want the flow context to be space
# independent.
ch = self.peek()
return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
and (ch == u'-' or (not self.flow_level and ch in u'?:')))
# Scanners.
def scan_to_next_token(self):
# We ignore spaces, line breaks and comments.
# If we find a line break in the block context, we set the flag
# `allow_simple_key` on.
# The byte order mark is stripped if it's the first character in the
# stream. We do not yet support BOM inside the stream as the
# specification requires. Any such mark will be considered as a part
# of the document.
#
# TODO: We need to make tab handling rules more sane. A good rule is
# Tabs cannot precede tokens
# BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
# KEY(block), VALUE(block), BLOCK-ENTRY
# So the checking code is
# if <TAB>:
# self.allow_simple_keys = False
# We also need to add the check for `allow_simple_keys == True` to
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
if self.index == 0 and self.peek() == u'\uFEFF':
self.forward()
found = False
while not found:
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
if self.scan_line_break():
if not self.flow_level:
self.allow_simple_key = True
else:
found = True
def scan_directive(self):
# See the specification for details.
start_mark = self.get_mark()
self.forward()
name = self.scan_directive_name(start_mark)
value = None
if name == u'YAML':
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.get_mark()
elif name == u'TAG':
value = self.scan_tag_directive_value(start_mark)
end_mark = self.get_mark()
else:
end_mark = self.get_mark()
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
self.scan_directive_ignored_line(start_mark)
return DirectiveToken(name, value, start_mark, end_mark)
def scan_directive_name(self, start_mark):
# See the specification for details.
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
return value
def scan_yaml_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
major = self.scan_yaml_directive_number(start_mark)
if self.peek() != '.':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or '.', but found %r"
% self.peek().encode('utf-8'),
self.get_mark())
self.forward()
minor = self.scan_yaml_directive_number(start_mark)
if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or ' ', but found %r"
% self.peek().encode('utf-8'),
self.get_mark())
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
# See the specification for details.
ch = self.peek()
if not (u'0' <= ch <= '9'):
raise ScannerError("while scanning a directive", start_mark,
"expected a digit, but found %r" % ch.encode('utf-8'),
self.get_mark())
length = 0
while u'0' <= self.peek(length) <= u'9':
length += 1
value = int(self.prefix(length))
self.forward(length)
return value
def scan_tag_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
handle = self.scan_tag_directive_handle(start_mark)
while self.peek() == u' ':
self.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
def scan_tag_directive_handle(self, start_mark):
# See the specification for details.
value = self.scan_tag_handle('directive', start_mark)
ch = self.peek()
if ch != u' ':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
return value
def scan_tag_directive_prefix(self, start_mark):
# See the specification for details.
value = self.scan_tag_uri('directive', start_mark)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
return value
def scan_directive_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in u'\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a comment or a line break, but found %r"
% ch.encode('utf-8'), self.get_mark())
self.scan_line_break()
def scan_anchor(self, TokenClass):
# The specification does not restrict characters for anchors and
# aliases. This may lead to problems, for instance, the document:
# [ *alias, value ]
# can be interpteted in two ways, as
# [ "value" ]
# and
# [ *alias , "value" ]
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.get_mark()
indicator = self.peek()
if indicator == '*':
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
end_mark = self.get_mark()
return TokenClass(value, start_mark, end_mark)
def scan_tag(self):
# See the specification for details.
start_mark = self.get_mark()
ch = self.peek(1)
if ch == u'<':
handle = None
self.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
if self.peek() != u'>':
raise ScannerError("while parsing a tag", start_mark,
"expected '>', but found %r" % self.peek().encode('utf-8'),
self.get_mark())
self.forward()
elif ch in u'\0 \t\r\n\x85\u2028\u2029':
handle = None
suffix = u'!'
self.forward()
else:
length = 1
use_handle = False
while ch not in u'\0 \r\n\x85\u2028\u2029':
if ch == u'!':
use_handle = True
break
length += 1
ch = self.peek(length)
handle = u'!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
handle = u'!'
self.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a tag", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
value = (handle, suffix)
end_mark = self.get_mark()
return TagToken(value, start_mark, end_mark)
def scan_block_scalar(self, style):
# See the specification for details.
if style == '>':
folded = True
else:
folded = False
chunks = []
start_mark = self.get_mark()
# Scan the header.
self.forward()
chomping, increment = self.scan_block_scalar_indicators(start_mark)
self.scan_block_scalar_ignored_line(start_mark)
# Determine the indentation level and go to the first non-empty line.
min_indent = self.indent+1
if min_indent < 1:
min_indent = 1
if increment is None:
breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
else:
indent = min_indent+increment-1
breaks, end_mark = self.scan_block_scalar_breaks(indent)
line_break = u''
# Scan the inner part of the block scalar.
while self.column == indent and self.peek() != u'\0':
chunks.extend(breaks)
leading_non_space = self.peek() not in u' \t'
length = 0
while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
length += 1
chunks.append(self.prefix(length))
self.forward(length)
line_break = self.scan_line_break()
breaks, end_mark = self.scan_block_scalar_breaks(indent)
if self.column == indent and self.peek() != u'\0':
# Unfortunately, folding rules are ambiguous.
#
# This is the folding according to the specification:
if folded and line_break == u'\n' \
and leading_non_space and self.peek() not in u' \t':
if not breaks:
chunks.append(u' ')
else:
chunks.append(line_break)
# This is Clark Evans's interpretation (also in the spec
# examples):
#
#if folded and line_break == u'\n':
# if not breaks:
# if self.peek() not in ' \t':
# chunks.append(u' ')
# else:
# chunks.append(line_break)
#else:
# chunks.append(line_break)
else:
break
# Chomp the tail.
if chomping is not False:
chunks.append(line_break)
if chomping is True:
chunks.extend(breaks)
# We are done.
return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
style)
def scan_block_scalar_indicators(self, start_mark):
# See the specification for details.
chomping = None
increment = None
ch = self.peek()
if ch in u'+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch in u'0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
elif ch in u'0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
ch = self.peek()
if ch in u'+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected chomping or indentation indicators, but found %r"
% ch.encode('utf-8'), self.get_mark())
return chomping, increment
def scan_block_scalar_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in u'\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected a comment or a line break, but found %r"
% ch.encode('utf-8'), self.get_mark())
self.scan_line_break()
def scan_block_scalar_indentation(self):
# See the specification for details.
chunks = []
max_indent = 0
end_mark = self.get_mark()
while self.peek() in u' \r\n\x85\u2028\u2029':
if self.peek() != u' ':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
else:
self.forward()
if self.column > max_indent:
max_indent = self.column
return chunks, max_indent, end_mark
def scan_block_scalar_breaks(self, indent):
# See the specification for details.
chunks = []
end_mark = self.get_mark()
while self.column < indent and self.peek() == u' ':
self.forward()
while self.peek() in u'\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
while self.column < indent and self.peek() == u' ':
self.forward()
return chunks, end_mark
def scan_flow_scalar(self, style):
# See the specification for details.
# Note that we loose indentation rules for quoted scalars. Quoted
# scalars don't need to adhere indentation because " and ' clearly
# mark the beginning and the end of them. Therefore we are less
# restrictive then the specification requires. We only need to check
# that document separators are not included in scalars.
if style == '"':
double = True
else:
double = False
chunks = []
start_mark = self.get_mark()
quote = self.peek()
self.forward()
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
while self.peek() != quote:
chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.forward()
end_mark = self.get_mark()
return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
style)
ESCAPE_REPLACEMENTS = {
u'0': u'\0',
u'a': u'\x07',
u'b': u'\x08',
u't': u'\x09',
u'\t': u'\x09',
u'n': u'\x0A',
u'v': u'\x0B',
u'f': u'\x0C',
u'r': u'\x0D',
u'e': u'\x1B',
u' ': u'\x20',
u'\"': u'\"',
u'\\': u'\\',
u'N': u'\x85',
u'_': u'\xA0',
u'L': u'\u2028',
u'P': u'\u2029',
}
ESCAPE_CODES = {
u'x': 2,
u'u': 4,
u'U': 8,
}
def scan_flow_scalar_non_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
length = 0
while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
length += 1
if length:
chunks.append(self.prefix(length))
self.forward(length)
ch = self.peek()
if not double and ch == u'\'' and self.peek(1) == u'\'':
chunks.append(u'\'')
self.forward(2)
elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
chunks.append(ch)
self.forward()
elif double and ch == u'\\':
self.forward()
ch = self.peek()
if ch in self.ESCAPE_REPLACEMENTS:
chunks.append(self.ESCAPE_REPLACEMENTS[ch])
self.forward()
elif ch in self.ESCAPE_CODES:
length = self.ESCAPE_CODES[ch]
self.forward()
for k in range(length):
if self.peek(k) not in u'0123456789ABCDEFabcdef':
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"expected escape sequence of %d hexdecimal numbers, but found %r" %
(length, self.peek(k).encode('utf-8')), self.get_mark())
code = int(self.prefix(length), 16)
chunks.append(unichr(code))
self.forward(length)
elif ch in u'\r\n\x85\u2028\u2029':
self.scan_line_break()
chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
else:
return chunks
def scan_flow_scalar_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
length = 0
while self.peek(length) in u' \t':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch == u'\0':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected end of stream", self.get_mark())
elif ch in u'\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
if line_break != u'\n':
chunks.append(line_break)
elif not breaks:
chunks.append(u' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
return chunks
def scan_flow_scalar_breaks(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
# Instead of checking indentation, we check for document
# separators.
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected document separator", self.get_mark())
while self.peek() in u' \t':
self.forward()
if self.peek() in u'\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
else:
return chunks
def scan_plain(self):
# See the specification for details.
# We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',', ':' and '?'.
# We also keep track of the `allow_simple_key` flag here.
# Indentation rules are loosed for the flow context.
chunks = []
start_mark = self.get_mark()
end_mark = start_mark
indent = self.indent+1
# We allow zero indentation for scalars, but then we need to check for
# document separators at the beginning of the line.
#if indent == 0:
# indent = 1
spaces = []
while True:
length = 0
if self.peek() == u'#':
break
while True:
ch = self.peek(length)
if ch in u'\0 \t\r\n\x85\u2028\u2029' \
or (not self.flow_level and ch == u':' and
self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
or (self.flow_level and ch in u',:?[]{}'):
break
length += 1
# It's not clear what we should do with ':' in the flow context.
if (self.flow_level and ch == u':'
and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
self.forward(length)
raise ScannerError("while scanning a plain scalar", start_mark,
"found unexpected ':'", self.get_mark(),
"Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
if length == 0:
break
self.allow_simple_key = False
chunks.extend(spaces)
chunks.append(self.prefix(length))
self.forward(length)
end_mark = self.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
if not spaces or self.peek() == u'#' \
or (not self.flow_level and self.column < indent):
break
return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
def scan_plain_spaces(self, indent, start_mark):
# See the specification for details.
# The specification is really confusing about tabs in plain scalars.
# We just forbid them completely. Do not use tabs in YAML!
chunks = []
length = 0
while self.peek(length) in u' ':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch in u'\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return
breaks = []
while self.peek() in u' \r\n\x85\u2028\u2029':
if self.peek() == ' ':
self.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return
if line_break != u'\n':
chunks.append(line_break)
elif not breaks:
chunks.append(u' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
return chunks
def scan_tag_handle(self, name, start_mark):
# See the specification for details.
# For some strange reasons, the specification does not allow '_' in
# tag handles. I have allowed it anyway.
ch = self.peek()
if ch != u'!':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch.encode('utf-8'),
self.get_mark())
length = 1
ch = self.peek(length)
if ch != u' ':
while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if ch != u'!':
self.forward(length)
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch.encode('utf-8'),
self.get_mark())
length += 1
value = self.prefix(length)
self.forward(length)
return value
def scan_tag_uri(self, name, start_mark):
# See the specification for details.
# Note: we do not check if URI is well-formed.
chunks = []
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
if ch == u'%':
chunks.append(self.prefix(length))
self.forward(length)
length = 0
chunks.append(self.scan_uri_escapes(name, start_mark))
else:
length += 1
ch = self.peek(length)
if length:
chunks.append(self.prefix(length))
self.forward(length)
length = 0
if not chunks:
raise ScannerError("while parsing a %s" % name, start_mark,
"expected URI, but found %r" % ch.encode('utf-8'),
self.get_mark())
return u''.join(chunks)
def scan_uri_escapes(self, name, start_mark):
# See the specification for details.
bytes = []
mark = self.get_mark()
while self.peek() == u'%':
self.forward()
for k in range(2):
if self.peek(k) not in u'0123456789ABCDEFabcdef':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
(self.peek(k).encode('utf-8')), self.get_mark())
bytes.append(chr(int(self.prefix(2), 16)))
self.forward(2)
try:
value = unicode(''.join(bytes), 'utf-8')
except UnicodeDecodeError, exc:
raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
return value
def scan_line_break(self):
# Transforms:
# '\r\n' : '\n'
# '\r' : '\n'
# '\n' : '\n'
# '\x85' : '\n'
# '\u2028' : '\u2028'
# '\u2029 : '\u2029'
# default : ''
ch = self.peek()
if ch in u'\r\n\x85':
if self.prefix(2) == u'\r\n':
self.forward(2)
else:
self.forward()
return u'\n'
elif ch in u'\u2028\u2029':
self.forward()
return ch
return u''
#try:
# import psyco
# psyco.bind(Scanner)
#except ImportError:
# pass
| Python |
__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
from reader import *
from scanner import *
from parser import *
from composer import *
from constructor import *
from resolver import *
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
Constructor.__init__(self)
Resolver.__init__(self)
| Python |
__all__ = ['Composer', 'ComposerError']
from error import MarkedYAMLError
from events import *
from nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor.encode('utf-8'), event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor.encode('utf-8'), self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| Python |
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
from emitter import *
from serializer import *
from representer import *
from resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_uncode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| Python |
# Abstract classes.
class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
if hasattr(self, key)]
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
def __init__(self, anchor, start_mark=None, end_mark=None):
self.anchor = anchor
self.start_mark = start_mark
self.end_mark = end_mark
class CollectionStartEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
flow_style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class CollectionEndEvent(Event):
pass
# Implementations.
class StreamStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndEvent(Event):
pass
class DocumentStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None, version=None, tags=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
class AliasEvent(NodeEvent):
pass
class ScalarEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, value,
start_mark=None, end_mark=None, style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class SequenceStartEvent(CollectionStartEvent):
pass
class SequenceEndEvent(CollectionEndEvent):
pass
class MappingStartEvent(CollectionStartEvent):
pass
class MappingEndEvent(CollectionEndEvent):
pass
| Python |
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError']
from error import *
from nodes import *
import datetime
try:
set
except NameError:
from sets import Set as set
import binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
class BaseConstructor(object):
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.constructed_objects:
return self.constructed_objects[node]
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
state_constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = generator.next()
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError, exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
add_constructor = classmethod(add_constructor)
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
add_multi_constructor = classmethod(add_multi_constructor)
class SafeConstructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == u'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return BaseConstructor.construct_scalar(self, node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == u'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == u'tag:yaml.org,2002:value':
key_node.tag = u'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return BaseConstructor.construct_mapping(self, node, deep=deep)
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
bool_values = {
u'yes': True,
u'no': False,
u'true': True,
u'false': False,
u'on': True,
u'off': False,
}
def construct_yaml_bool(self, node):
value = self.construct_scalar(node)
return self.bool_values[value.lower()]
def construct_yaml_int(self, node):
value = str(self.construct_scalar(node))
value = value.replace('_', '')
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
elif value.startswith('0b'):
return sign*int(value[2:], 2)
elif value.startswith('0x'):
return sign*int(value[2:], 16)
elif value[0] == '0':
return sign*int(value, 8)
elif ':' in value:
digits = [int(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*int(value)
inf_value = 1e300
while inf_value != inf_value*inf_value:
inf_value *= inf_value
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = str(self.construct_scalar(node))
value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '.inf':
return sign*self.inf_value
elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0.0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*float(value)
def construct_yaml_binary(self, node):
value = self.construct_scalar(node)
try:
return str(value).decode('base64')
except (binascii.Error, UnicodeEncodeError), exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
timestamp_regexp = re.compile(
ur'''^(?P<year>[0-9][0-9][0-9][0-9])
-(?P<month>[0-9][0-9]?)
-(?P<day>[0-9][0-9]?)
(?:(?:[Tt]|[ \t]+)
(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
:(?P<second>[0-9][0-9])
(?:\.(?P<fraction>[0-9]*))?
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
day = int(values['day'])
if not values['hour']:
return datetime.date(year, month, day)
hour = int(values['hour'])
minute = int(values['minute'])
second = int(values['second'])
fraction = 0
if values['fraction']:
fraction = int(values['fraction'][:6].ljust(6, '0'))
delta = None
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
tz_minute = int(values['tz_minute'] or 0)
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
data = datetime.datetime(year, month, day, hour, minute, second, fraction)
if delta:
data -= delta
return data
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
omap = []
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
omap.append((key, value))
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
pairs = []
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
pairs.append((key, value))
def construct_yaml_set(self, node):
data = set()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_str(self, node):
value = self.construct_scalar(node)
try:
return value.encode('ascii')
except UnicodeEncodeError:
return value
def construct_yaml_seq(self, node):
data = []
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = {}
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_object(self, node, cls):
data = cls.__new__(cls)
yield data
if hasattr(data, '__setstate__'):
state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
state = self.construct_mapping(node)
data.__dict__.update(state)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
node.start_mark)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
class Constructor(SafeConstructor):
def construct_python_str(self, node):
return self.construct_scalar(node).encode('utf-8')
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_long(self, node):
return long(self.construct_yaml_int(node))
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
try:
__import__(name)
except ImportError, exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
return sys.modules[name]
def find_python_name(self, name, mark):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if u'.' in name:
# Python 2.4 only
#module_name, object_name = name.rsplit('.', 1)
items = name.split('.')
object_name = items.pop()
module_name = '.'.join(items)
else:
module_name = '__builtin__'
object_name = name
try:
__import__(module_name)
except ImportError, exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r" % (object_name.encode('utf-8'),
module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value.encode('utf-8'),
node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value.encode('utf-8'),
node.start_mark)
return self.find_python_module(suffix, node.start_mark)
class classobj: pass
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if newobj and isinstance(cls, type(self.classobj)) \
and not args and not kwds:
instance = self.classobj()
instance.__class__ = cls
return instance
elif newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
setattr(object, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/none',
Constructor.construct_yaml_null)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/bool',
Constructor.construct_yaml_bool)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/str',
Constructor.construct_python_str)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
Constructor.construct_python_unicode)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/int',
Constructor.construct_yaml_int)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/long',
Constructor.construct_python_long)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/float',
Constructor.construct_yaml_float)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/complex',
Constructor.construct_python_complex)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/list',
Constructor.construct_yaml_seq)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/tuple',
Constructor.construct_python_tuple)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
Constructor.construct_yaml_map)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/name:',
Constructor.construct_python_name)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/module:',
Constructor.construct_python_module)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object:',
Constructor.construct_python_object)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object/apply:',
Constructor.construct_python_object_apply)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object/new:',
Constructor.construct_python_object_new)
| Python |
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
from _yaml import CParser, CEmitter
from constructor import *
from serializer import *
from representer import *
from resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| Python |
__all__ = ['Serializer', 'SerializerError']
from error import YAMLError
from events import *
from nodes import *
class SerializerError(YAMLError):
pass
class Serializer(object):
ANCHOR_TEMPLATE = u'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_alias_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
| Python |
__all__ = ['BaseResolver', 'Resolver']
from error import *
from nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
add_implicit_resolver = classmethod(add_implicit_resolver)
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, basestring) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (basestring, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
add_path_resolver = classmethod(add_path_resolver)
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, basestring):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, basestring):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == u'':
resolvers = self.yaml_implicit_resolvers.get(u'', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:bool',
re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)?\.[0-9_]*(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:int',
re.compile(ur'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list(u'-+0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:merge',
re.compile(ur'^(?:<<)$'),
['<'])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:null',
re.compile(ur'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u''])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:timestamp',
re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:value',
re.compile(ur'^(?:=)$'),
['='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:yaml',
re.compile(ur'^(?:!|&|\*)$'),
list(u'!&*'))
| Python |
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
value = self.value
#if isinstance(value, list):
# if len(value) == 0:
# value = '<empty>'
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = '<%d items>' % len(value)
#else:
# if len(value) > 75:
# value = repr(value[:70]+u' ... ')
# else:
# value = repr(value)
value = repr(value)
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
class ScalarNode(Node):
id = 'scalar'
def __init__(self, tag, value,
start_mark=None, end_mark=None, style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class CollectionNode(Node):
def __init__(self, tag, value,
start_mark=None, end_mark=None, flow_style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class SequenceNode(CollectionNode):
id = 'sequence'
class MappingNode(CollectionNode):
id = 'mapping'
| Python |
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from error import YAMLError, Mark
import codecs, re
# Unfortunately, codec functions in Python 2.3 does not support the `finish`
# arguments, so we have to write our own wrappers.
try:
codecs.utf_8_decode('', 'strict', False)
from codecs import utf_8_decode, utf_16_le_decode, utf_16_be_decode
except TypeError:
def utf_16_le_decode(data, errors, finish=False):
if not finish and len(data) % 2 == 1:
data = data[:-1]
return codecs.utf_16_le_decode(data, errors)
def utf_16_be_decode(data, errors, finish=False):
if not finish and len(data) % 2 == 1:
data = data[:-1]
return codecs.utf_16_be_decode(data, errors)
def utf_8_decode(data, errors, finish=False):
if not finish:
# We are trying to remove a possible incomplete multibyte character
# from the suffix of the data.
# The first byte of a multi-byte sequence is in the range 0xc0 to 0xfd.
# All further bytes are in the range 0x80 to 0xbf.
# UTF-8 encoded UCS characters may be up to six bytes long.
count = 0
while count < 5 and count < len(data) \
and '\x80' <= data[-count-1] <= '\xBF':
count -= 1
if count < 5 and count < len(data) \
and '\xC0' <= data[-count-1] <= '\xFD':
data = data[:-count-1]
return codecs.utf_8_decode(data, errors)
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, str):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (ord(self.character), self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to unicode,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `str` object,
# - a `unicode` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = u''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, unicode):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+u'\0'
elif isinstance(stream, str):
self.name = "<string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = ''
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in u'\n\x85\u2028\u2029' \
or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
self.line += 1
self.column = 0
elif ch != u'\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and len(self.raw_buffer) < 2:
self.update_raw()
if not isinstance(self.raw_buffer, unicode):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, character,
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError, exc:
character = exc.object[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += u'\0'
self.raw_buffer = None
break
def update_raw(self, size=1024):
data = self.stream.read(size)
if data:
self.raw_buffer += data
self.stream_pointer += len(data)
else:
self.eof = True
#try:
# import psyco
# psyco.bind(Reader)
#except ImportError:
# pass
| Python |
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
#
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
# block_node_or_indentless_sequence ::=
# ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# FIRST sets:
#
# stream: { STREAM-START }
# explicit_document: { DIRECTIVE DOCUMENT-START }
# implicit_document: FIRST(block_node)
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_sequence: { BLOCK-SEQUENCE-START }
# block_mapping: { BLOCK-MAPPING-START }
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
# indentless_sequence: { ENTRY }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_sequence: { FLOW-SEQUENCE-START }
# flow_mapping: { FLOW-MAPPING-START }
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
__all__ = ['Parser', 'ParserError']
from error import MarkedYAMLError
from tokens import *
from events import *
from scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser(object):
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
# Note that we use Python generators. If you rewrite the parser in another
# language, you may replace all 'yield'-s with event handler calls.
DEFAULT_TAGS = {
u'!': u'!',
u'!!': u'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
event = self.process_empty_scalar(self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
def process_directives(self):
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == u'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == u'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle.encode('utf-8'),
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
# block_node_or_indentless_sequence ::= ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
if self.check_token(AliasToken):
token = self.get_token()
event = AliasEvent(token.value, token.start_mark, token.end_mark)
self.state = self.states.pop()
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None:
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle.encode('utf-8'),
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == u'!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = (tag is None or tag == u'!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
self.state = self.parse_indentless_sequence_entry
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == u'!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_sequence_first_entry
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), u'',
start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while parsing a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
def parse_block_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_sequence_entry()
def parse_block_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
self.states.append(self.parse_block_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_block_sequence_entry
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block collection", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
def parse_indentless_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.peek_token()
event = SequenceEndEvent(token.start_mark, token.start_mark)
self.state = self.states.pop()
return event
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
def parse_block_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_mapping_key()
def parse_block_mapping_key(self):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_value)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_value
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block mapping", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_block_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_key)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_block_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(FlowSequenceEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow sequence", self.marks[-1],
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.peek_token()
event = MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_key(self):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_value
return self.process_empty_scalar(token.end_mark)
def parse_flow_sequence_entry_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_end)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_end
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_sequence_entry_mapping_end
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return MappingEndEvent(token.start_mark, token.start_mark)
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(FlowMappingEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow mapping", self.marks[-1],
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_value
return self.process_empty_scalar(token.end_mark)
elif not self.check_token(FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_mapping_empty_value(self):
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(self.peek_token().start_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), u'', mark, mark)
| Python |
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from error import *
from nodes import *
import datetime
try:
set
except NameError:
from sets import Set as set
import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
for base in cls.__bases__:
bases.extend(self.get_classobj_bases(base))
return bases
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
add_representer = classmethod(add_representer)
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
add_multi_representer = classmethod(add_multi_representer)
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
mapping.sort()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = u'.nan'
elif data == self.inf_value:
value = u'.inf'
elif data == -self.inf_value:
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if u'.' not in value and u'e' in value:
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(unicode,
SafeRepresenter.represent_unicode)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(long,
SafeRepresenter.represent_long)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if int(data) is not data:
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if data.imag == 0.0:
data = u'%r' % data.real
elif data.real == 0.0:
data = u'%rj' % data.imag
elif data.imag > 0:
data = u'%r+%rj' % (data.real, data.imag)
else:
data = u'%r%rj' % (data.real, data.imag)
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = u'%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
def represent_module(self, data):
return self.represent_scalar(
u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
def represent_instance(self, data):
# For instances of classic classes, we use __getinitargs__ and
# __getstate__ to serialize the data.
# If data.__getinitargs__ exists, the object must be reconstructed by
# calling cls(**args), where args is a tuple returned by
# __getinitargs__. Otherwise, the cls.__init__ method should never be
# called and the class instance is created by instantiating a trivial
# class and assigning to the instance's __class__ variable.
# If data.__getstate__ exists, it returns the state of the object.
# Otherwise, the state of the object is data.__dict__.
# We produce either a !!python/object or !!python/object/new node.
# If data.__getinitargs__ does not exist and state is a dictionary, we
# produce a !!python/object node . Otherwise we produce a
# !!python/object/new node.
cls = data.__class__
class_name = u'%s.%s' % (cls.__module__, cls.__name__)
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
return self.represent_sequence(
u'tag:yaml.org,2002:python/object/new:'+class_name, args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping(
u'tag:yaml.org,2002:python/object/new:'+class_name, value)
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copy_reg.dispatch_table:
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(str,
Representer.represent_str)
Representer.add_representer(unicode,
Representer.represent_unicode)
Representer.add_representer(long,
Representer.represent_long)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.ClassType,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
Representer.represent_object)
| Python |
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
__all__ = ['Emitter', 'EmitterError']
from error import YAMLError
from events import *
import re
class EmitterError(YAMLError):
pass
class ScalarAnalysis(object):
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Emitter(object):
DEFAULT_TAG_PREFIXES = {
u'!' : u'!',
u'tag:yaml.org,2002:' : u'!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overriden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = u'\n'
if line_break in [u'\r', u'\n', u'\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if self.event.encoding:
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s"
% self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = self.event.tags.keys()
handles.sort()
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator(u'---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s"
% self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator(u'...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s"
% self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor(u'&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor(u'*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator(u'[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator(u'{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(u':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
self.write_indicator(u'-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.check_simple_key():
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(u':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and event.value == u'')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = u'!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if not self.event.style and self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
#if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return u'%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != u'!' or handle[-1] != u'!':
raise EmitterError("tag handle must start and end with '!': %r"
% (handle.encode('utf-8')))
for ch in handle[1:-1]:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch.encode('utf-8'), handle.encode('utf-8')))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == u'!':
end = 1
while end < len(prefix):
ch = prefix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return u''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == u'!':
return tag
handle = None
suffix = tag
for prefix in self.tag_prefixes:
if tag.startswith(prefix) \
and (prefix == u'!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-;/?:@&=+$,_.~*\'()[]' \
or (ch == u'!' and handle != u'!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(suffix[start:end])
suffix_text = u''.join(chunks)
if handle:
return u'%s%s' % (handle, suffix_text)
else:
return u'!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch.encode('utf-8'), anchor.encode('utf-8')))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Whitespaces.
inline_spaces = False # non-space space+ non-space
inline_breaks = False # non-space break+ non-space
leading_spaces = False # ^ space+ (non-space | $)
leading_breaks = False # ^ break+ (non-space | $)
trailing_spaces = False # (^ | non-space) space+ $
trailing_breaks = False # (^ | non-space) break+ $
inline_breaks_spaces = False # non-space break+ space+ non-space
mixed_breaks_spaces = False # anything else
# Check document indicators.
if scalar.startswith(u'---') or scalar.startswith(u'...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceeded_by_space = True
# Last character or followed by a whitespace.
followed_by_space = (len(scalar) == 1 or
scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
# The current series of whitespaces contain plain spaces.
spaces = False
# The current series of whitespaces contain line breaks.
breaks = False
# The current series of whitespaces contain a space followed by a
# break.
mixed = False
# The current series of whitespaces start at the beginning of the
# scalar.
leading = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in u'#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in u'?:':
flow_indicators = True
if followed_by_space:
block_indicators = True
if ch == u'-' and followed_by_space:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in u',?[]{}':
flow_indicators = True
if ch == u':':
flow_indicators = True
if followed_by_space:
block_indicators = True
if ch == u'#' and preceeded_by_space:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in u'\n\x85\u2028\u2029':
line_breaks = True
if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Spaces, line breaks, and how they are mixed. State machine.
# Start or continue series of whitespaces.
if ch in u' \n\x85\u2028\u2029':
if spaces and breaks:
if ch != u' ': # break+ (space+ break+) => mixed
mixed = True
elif spaces:
if ch != u' ': # (space+ break+) => mixed
breaks = True
mixed = True
elif breaks:
if ch == u' ': # break+ space+
spaces = True
else:
leading = (index == 0)
if ch == u' ': # space+
spaces = True
else: # break+
breaks = True
# Series of whitespaces ended with a non-space.
elif spaces or breaks:
if leading:
if spaces and breaks:
mixed_breaks_spaces = True
elif spaces:
leading_spaces = True
elif breaks:
leading_breaks = True
else:
if mixed:
mixed_breaks_spaces = True
elif spaces and breaks:
inline_breaks_spaces = True
elif spaces:
inline_spaces = True
elif breaks:
inline_breaks = True
spaces = breaks = mixed = leading = False
# Series of whitespaces reach the end.
if (spaces or breaks) and (index == len(scalar)-1):
if spaces and breaks:
mixed_breaks_spaces = True
elif spaces:
trailing_spaces = True
if leading:
leading_spaces = True
elif breaks:
trailing_breaks = True
if leading:
leading_breaks = True
spaces = breaks = mixed = leading = False
# Prepare for the next character.
index += 1
preceeded_by_space = (ch in u'\0 \t\r\n\x85\u2028\u2029')
followed_by_space = (index+1 >= len(scalar) or
scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespace are bad for plain scalars. We also
# do not want to mess with leading whitespaces for block scalars.
if leading_spaces or leading_breaks or trailing_spaces:
allow_flow_plain = allow_block_plain = allow_block = False
# Trailing breaks are fine for block scalars, but unacceptable for
# plain scalars.
if trailing_breaks:
allow_flow_plain = allow_block_plain = False
# The combination of (space+ break+) is only acceptable for block
# scalars.
if inline_breaks_spaces:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Mixed spaces and breaks, as well as special character are only
# allowed for double quoted scalars.
if mixed_breaks_spaces or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# We don't emit multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write(u'\xFF\xFE'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = u' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = u' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = u'%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = u'%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator(u'\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != u' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == u'\'':
data = u'\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
self.write_indicator(u'\'', False)
ESCAPE_REPLACEMENTS = {
u'\0': u'0',
u'\x07': u'a',
u'\x08': u'b',
u'\x09': u't',
u'\x0A': u'n',
u'\x0B': u'v',
u'\x0C': u'f',
u'\x0D': u'r',
u'\x1B': u'e',
u'\"': u'\"',
u'\\': u'\\',
u'\x85': u'N',
u'\xA0': u'_',
u'\u2028': u'L',
u'\u2029': u'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator(u'"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
or not (u'\x20' <= ch <= u'\x7E'
or (self.allow_unicode
and (u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= u'\xFF':
data = u'\\x%02X' % ord(ch)
elif ch <= u'\uFFFF':
data = u'\\u%04X' % ord(ch)
else:
data = u'\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+u'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == u' ':
data = u'\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator(u'"', False)
def determine_chomp(self, text):
tail = text[-2:]
while len(tail) < 2:
tail = u' '+tail
if tail[-1] in u'\n\x85\u2028\u2029':
if tail[-2] in u'\n\x85\u2028\u2029':
return u'+'
else:
return u''
else:
return u'-'
def write_folded(self, text):
chomp = self.determine_chomp(text)
self.write_indicator(u'>'+chomp, True)
self.write_indent()
leading_space = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != u' ' \
and text[start] == u'\n':
self.write_line_break()
leading_space = (ch == u' ')
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
spaces = (ch == u' ')
end += 1
def write_literal(self, text):
chomp = self.determine_chomp(text)
self.write_indicator(u'|'+chomp, True)
self.write_indent()
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in u'\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if not text:
return
if not self.whitespace:
data = u' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.writespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
self.writespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
| Python |
from error import *
from tokens import *
from events import *
from nodes import *
from loader import *
from dumper import *
try:
from cyaml import *
except ImportError:
pass
def scan(stream, Loader=Loader):
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
while loader.check_token():
yield loader.get_token()
def parse(stream, Loader=Loader):
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
while loader.check_event():
yield loader.get_event()
def compose(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
loader = Loader(stream)
if loader.check_node():
return loader.get_node()
def compose_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponsing representation trees.
"""
loader = Loader(stream)
while loader.check_node():
yield loader.get_node()
def load_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
loader = Loader(stream)
while loader.check_data():
yield loader.get_data()
def load(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
loader = Loader(stream)
if loader.check_data():
return loader.get_data()
def safe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
return load_all(stream, SafeLoader)
def safe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
return load(stream, SafeLoader)
def emit(events, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
for event in events:
dumper.emit(event)
if getvalue:
return getvalue()
def serialize_all(nodes, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding='utf-8', explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
dumper.open()
for node in nodes:
dumper.serialize(node)
dumper.close()
if getvalue:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(documents, stream=None, Dumper=Dumper,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding='utf-8', explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
dumper.open()
for data in documents:
dumper.represent(data)
dumper.close()
if getvalue:
return getvalue()
def dump(data, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=Dumper, **kwds)
def safe_dump_all(documents, stream=None, **kwds):
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def add_implicit_resolver(tag, regexp, first=None,
Loader=Loader, Dumper=Dumper):
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
Loader.add_implicit_resolver(tag, regexp, first)
Dumper.add_implicit_resolver(tag, regexp, first)
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind)
def add_constructor(tag, constructor, Loader=Loader):
"""
Add a constructor for the given tag.
Constructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
Loader.add_constructor(tag, constructor)
def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
Loader.add_multi_constructor(tag_prefix, multi_constructor)
def add_representer(data_type, representer, Dumper=Dumper):
"""
Add a representer for the given type.
Representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
Dumper.add_representer(data_type, representer)
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
"""
Add a representer for the given type.
Multi-representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
Dumper.add_multi_representer(data_type, multi_representer)
class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
cls.yaml_dumper.add_representer(cls, cls.to_yaml)
class YAMLObject(object):
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__metaclass__ = YAMLObjectMetaclass
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_loader = Loader
yaml_dumper = Dumper
yaml_tag = None
yaml_flow_style = None
def from_yaml(cls, loader, node):
"""
Convert a representation node to a Python object.
"""
return loader.construct_yaml_object(node, cls)
from_yaml = classmethod(from_yaml)
def to_yaml(cls, dumper, data):
"""
Convert a Python object to a representation node.
"""
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
flow_style=cls.yaml_flow_style)
to_yaml = classmethod(to_yaml)
| Python |
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
class Mark(object):
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end].encode('utf-8')
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'antlr3'),
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
SCRIPT_EXCEPTIONS = {
"dev_appserver.py" : "dev_appserver_main.py"
}
def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
sys.path = EXTRA_PATHS + sys.path
script_name = os.path.basename(file_path)
script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
run_file(__file__, globals())
| Python |
# coding: utf-8
from config.url import urls
import web
app = web.application(urls, globals())
def notfound():
return web.notfound('Sorry, the page you were looking for was not found.')
def internalerror():
return web.internalerror('Bad, bad server. No donut for you.')
app.notfound = notfound
if __name__ == '__main__':
app.run()
| Python |
# coding: utf-8
#
# 获得文件夹的大小
# @author wangtao
# @version 2011.07.10
#
import os
import sys
from os.path import join, getsize
inputdir = sys.argv[1]
def getdirsize(dir):
size = 0L
for root, dirs, files in os.walk(dir):
size += sum([getsize(join(root, name)) for name in files])
return size
if __name__ == "__main__":
filesize = getdirsize(inputdir)
print filesize
| Python |
#!/usr/bin/env python
# coding: utf-8
import web
from config import settings
from datetime import datetime
render = settings.render
db = settings.db
class hello:
def GET(self):
return 'Hello, Feed!'
class redirect:
def GET(self, path):
web.seeother('/' + path)
class index:
def GET(self):
blogInfo = db.select('blog_info', what='BID, Blog_Title, Blog_Link, Blog_LastBuildDate, Blog_Feed', where=' 1', order='Blog_LastBuildDate DESC')
newPosts = db.select('post_into', what='Pid, Post_Title, Post_Link, Post_Content', where = ' 1', order='Pid DESC', limit=20)
return render.index(blogInfo = blogInfo, newPosts = newPosts)
class readOne:
def GET(self, postId):
postInfo = db.select('post_into', where = 'Pid = ' + postId, limit=1)
return render.readpost(postInfo = postInfo[0])
class readRSS:
def GET(self, blogId):
blogInfo = db.select('blog_info', what='Blog_Title, countNUM', where='BID = ' + blogId, limit = 1)
postInfo = db.select('post_into', what = '*', where = 'BID = ' + blogId, order = 'Pid DESC', limit = 10)
return render.readRss(blogInfo = blogInfo[0], postInfo = postInfo)
#'SELECT * FROM `post_into` WHERE `BID` =' . $BID . ' ';
#SELECT * FROM `post_into` WHERE `Pid` =100658 LIMIT 0 , 1
class listx:
def GET(self, name):
return "Listing info about user: {0}".format(name)
| Python |
#!/usr/bin/env python
# coding: utf-8
import web
from config import settings
from datetime import datetime
render = settings.render
db = settings.db
class hello:
def GET(self):
return 'Hello, Feed!'
class redirect:
def GET(self, path):
web.seeother('/' + path)
class index:
def GET(self):
blogInfo = db.select('blog_info', what='BID, Blog_Title, Blog_Link, Blog_LastBuildDate, Blog_Feed', where=' 1', order='Blog_LastBuildDate DESC')
newPosts = db.select('post_into', what='Pid, Post_Title, Post_Link, Post_Content', where = ' 1', order='Pid DESC', limit=20)
return render.index(blogInfo = blogInfo, newPosts = newPosts)
class readOne:
def GET(self, postId):
postInfo = db.select('post_into', where = 'Pid = ' + postId, limit=1)
return render.readpost(postInfo = postInfo[0])
class readRSS:
def GET(self, blogId):
blogInfo = db.select('blog_info', what='Blog_Title, countNUM', where='BID = ' + blogId, limit = 1)
postInfo = db.select('post_into', what = '*', where = 'BID = ' + blogId, order = 'Pid DESC', limit = 10)
return render.readRss(blogInfo = blogInfo[0], postInfo = postInfo)
#'SELECT * FROM `post_into` WHERE `BID` =' . $BID . ' ';
#SELECT * FROM `post_into` WHERE `Pid` =100658 LIMIT 0 , 1
class listx:
def GET(self, name):
return "Listing info about user: {0}".format(name)
| Python |
# coding: utf-8
def getVisit():
visit = db.select('sys', what='value', where='`sid` =1' , limit=1)
return visit[0]['value'] | Python |
#!/usr/bin/env python
# coding: utf-8
import web
import options
db = web.database(dbn='mysql', db='feed', user='root', pw='yunlian', host='127.0.0.1')
def getVisit():
return 4
#visit = db.select('sys', what='value', where='`sid` =1' , limit=1)
#return visit[0]['value']
def getTableSize():
size = db.query('SHOW TABLE STATUS LIKE \'post_into\'')
return size[0]['Data_length']
def strip_tags(html):
result = []
parser = HTMLParser()
parser.handle_data = result.append
parser.feed(html)
parser.close()
return ''.join(result)
#render = web.template.render('templates', globals={'stat':status})
def formatSize(ss, u = 'B', p = 1):
us['B']='K'
us['K']='M'
us['M']='G'
us['G']='T'
#s,$u='B',$p=1)
#return (($u!=='B')&&(!isset($us[$u]))||($s<1024))?(number_format($s,$p)." $u"):(formatSize($s/1024,$us[$u],$p));
#return ((u !== 'B') && )
s = getTableSize()
print s
render = web.template.render('templates/', cache=False)
visit = getVisit()
web.config.debug = True
config = web.storage(
email = 'wangtao@iwangtao.com',
site_name = '我的订阅',
site_url = 'http://localhost:8080',
site_desc = '',
static = '/static',
visit = visit,
)
web.template.Template.globals['config'] = config
web.template.Template.globals['render'] = render
web.template.Template.globals['stripTags'] = strip_tags
| Python |
#!/usr/bin/env python
# coding: utf-8
import web
import options
db = web.database(dbn='mysql', db='feed', user='root', pw='yunlian', host='127.0.0.1')
def getVisit():
return 4
#visit = db.select('sys', what='value', where='`sid` =1' , limit=1)
#return visit[0]['value']
def getTableSize():
size = db.query('SHOW TABLE STATUS LIKE \'post_into\'')
return size[0]['Data_length']
def strip_tags(html):
result = []
parser = HTMLParser()
parser.handle_data = result.append
parser.feed(html)
parser.close()
return ''.join(result)
#render = web.template.render('templates', globals={'stat':status})
def formatSize(ss, u = 'B', p = 1):
us['B']='K'
us['K']='M'
us['M']='G'
us['G']='T'
#s,$u='B',$p=1)
#return (($u!=='B')&&(!isset($us[$u]))||($s<1024))?(number_format($s,$p)." $u"):(formatSize($s/1024,$us[$u],$p));
#return ((u !== 'B') && )
s = getTableSize()
print s
render = web.template.render('templates/', cache=False)
visit = getVisit()
web.config.debug = True
config = web.storage(
email = 'wangtao@iwangtao.com',
site_name = '我的订阅',
site_url = 'http://localhost:8080',
site_desc = '',
static = '/static',
visit = visit,
)
web.template.Template.globals['config'] = config
web.template.Template.globals['render'] = render
web.template.Template.globals['stripTags'] = strip_tags
| Python |
#!/usr/bin/env python
# coding: utf-8
pre_fix = 'controllers.'
urls = (
'/(.*)/', pre_fix + 'feed.redirect',
'/', pre_fix + 'feed.index',
'/list/(.+)', pre_fix + 'feed.listx',
'/hello', pre_fix +'feed.hello',
'/readOne/(.+)', pre_fix +'feed.readOne',
'/readRSS/(.+)', pre_fix +'feed.readRSS',
)
#http://localhost:8080/readRSS/5
#/readOne/30548
| Python |
#!/usr/bin/env python
# coding: utf-8
pre_fix = 'controllers.'
urls = (
'/(.*)/', pre_fix + 'feed.redirect',
'/', pre_fix + 'feed.index',
'/list/(.+)', pre_fix + 'feed.listx',
'/hello', pre_fix +'feed.hello',
'/readOne/(.+)', pre_fix +'feed.readOne',
'/readRSS/(.+)', pre_fix +'feed.readRSS',
)
#http://localhost:8080/readRSS/5
#/readOne/30548
| Python |
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c8"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
}
import sys, os
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
from finddata import find_package_data
setup(
name = 'Feedjack',
version = '0.9.16',
url = 'http://www.feedjack.org/',
author = 'Gustavo Picón',
author_email = 'gpicon@gmail.com',
license = 'BSD',
packages = find_packages(),
package_data = find_package_data(where='feedjack', package='feedjack'),
scripts = ['feedjack/bin/feedjack_update.py'],
zip_safe = False,
description = 'Multisite Feed Agregator (Planet)',
long_description = '''
Feedjack is a feed aggregator writen in Python using the Django web development
framework.
Like the Planet feed aggregator:
* It downloads feeds and aggregate their contents in a single site
* The new aggregated site has a feed of its own (atom and rss)
* It uses Mark Pilgrim’s excelent FeedParser
* The subscriber list can be exported as OPML and FOAF
But FeedJack also has some advantages:
* It handles historical data, you can read old posts
* It parses a lot more info, including post categories
* It generates pages with posts of a certain category
* It generates pages with posts from a certain subscriber
* It generates pages with posts of a certain category from a certain
subcriber
* A cloud tag/folksonomy (hype 2.0 compliant) for every page and every
subscriber
* It uses Django templates
* The administration is done via web (using Django's kickass autogenerated
and magical admin site), and can handle multiple planets
* Extensive use of django’s internal cache engine. Most of the time you
will have no database hits when serving pages.''',
)
| Python |
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
import os
import sys
from fnmatch import fnmatchcase
from distutils.util import convert_path
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, '__init__.py'))
and not prefix):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
if __name__ == '__main__':
import pprint
pprint.pprint(
find_package_data(show_ignored=True))
| Python |
# -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
fjcache.py
"""
import md5
from django.core.cache import cache
from django.conf import settings
T_HOST = 1
T_ITEM = 2
T_META = 3
def str2md5(key):
""" Returns the md5 hash of a string.
"""
ctx = md5.new()
ctx.update(key.encode('utf-8'))
return ctx.hexdigest()
def getkey(stype, site_id=None, key=None):
""" Returns the cache key depending on it's type.
"""
base = '%s.feedjack' % (settings.CACHE_MIDDLEWARE_KEY_PREFIX)
if stype == T_HOST:
return '%s.hostcache' % base
elif stype == T_ITEM:
return '%s.%d.item.%s' % (base, site_id, str2md5(key))
elif stype == T_META:
return '%s.%d.meta' % (base, site_id)
def hostcache_get():
""" Retrieves the hostcache dictionary
"""
return cache.get(getkey(T_HOST))
def hostcache_set(value):
""" Sets the hostcache dictionary
"""
cache.set(getkey(T_HOST), value)
def cache_get(site_id, key):
""" Retrieves cache data from a site.
"""
return cache.get(getkey(T_ITEM, site_id, key))
def cache_set(site, key, data):
""" Sets cache data for a site.
All keys related to a site are stored in a meta key. This key is per-site.
"""
tkey = getkey(T_ITEM, site.id, key)
mkey = getkey(T_META, site.id)
tmp = cache.get(mkey)
longdur = 365*24*60*60
if not tmp:
tmp = [tkey]
cache.set(mkey, [tkey], longdur)
elif tkey not in tmp:
tmp.append(tkey)
cache.set(mkey, tmp, longdur)
cache.set(tkey, data, site.cache_duration)
def cache_delsite(site_id):
""" Removes all cache data from a site.
"""
mkey = getkey(T_META, site_id)
tmp = cache.get(mkey)
if not tmp:
return
for tkey in tmp:
cache.delete(tkey)
cache.delete(mkey)
| Python |
# -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
fjcloud.py
"""
import math
from feedjack import fjlib
from feedjack import fjcache
def getsteps(levels, tagmax):
""" Returns a list with the max number of posts per "tagcloud level"
"""
ntw = levels
if ntw < 2:
ntw = 2
steps = [(stp, 1 + (stp * int(math.ceil(tagmax * 1.0 / ntw - 1))))
for stp in range(ntw)]
# just to be sure~
steps[-1] = (steps[-1][0], tagmax+1)
return steps
def build(site, tagdata):
""" Returns the tag cloud for a list of tags.
"""
tagdata.sort()
# we get the most popular tag to calculate the tags' weigth
tagmax = 0
for tagname, tagcount in tagdata:
if tagcount > tagmax:
tagmax = tagcount
steps = getsteps(site.tagcloud_levels, tagmax)
tags = []
for tagname, tagcount in tagdata:
weight = [twt[0] \
for twt in steps if twt[1] >= tagcount and twt[1] > 0][0]+1
tags.append({'tagname':tagname, 'count':tagcount, 'weight':weight})
return tags
def cloudata(site):
""" Returns a dictionary with all the tag clouds related to a site.
"""
tagdata = fjlib.getquery("""
SELECT feedjack_post.feed_id, feedjack_tag.name, COUNT(*)
FROM feedjack_post, feedjack_subscriber, feedjack_tag,
feedjack_post_tags
WHERE feedjack_post.feed_id=feedjack_subscriber.feed_id AND
feedjack_post_tags.tag_id=feedjack_tag.id AND
feedjack_post_tags.post_id=feedjack_post.id AND
feedjack_subscriber.site_id=%d
GROUP BY feedjack_post.feed_id, feedjack_tag.name
ORDER BY feedjack_post.feed_id, feedjack_tag.name""" % site.id)
tagdict = {}
globaldict = {}
cloudict = {}
for feed_id, tagname, tagcount in tagdata:
if feed_id not in tagdict:
tagdict[feed_id] = []
tagdict[feed_id].append((tagname, tagcount))
try:
globaldict[tagname] += tagcount
except KeyError:
globaldict[tagname] = tagcount
tagdict[0] = globaldict.items()
for key, val in tagdict.items():
cloudict[key] = build(site, val)
return cloudict
def getcloud(site, feed_id=None):
""" Returns the tag cloud for a site or a site's subscriber.
"""
cloudict = fjcache.cache_get(site.id, 'tagclouds')
if not cloudict:
cloudict = cloudata(site)
fjcache.cache_set(site, 'tagclouds', cloudict)
# A subscriber's tag cloud has been requested.
if feed_id:
feed_id = int(feed_id)
if feed_id in cloudict:
return cloudict[feed_id]
return []
# The site tagcloud has been requested.
return cloudict[0]
#~
| Python |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0232, R0903, W0131
"""
feedjack
Gustavo Picón
models.py
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
from feedjack import fjcache
SITE_ORDERBY_CHOICES = (
(1, _('Date published.')),
(2, _('Date the post was first obtained.'))
)
class Link(models.Model):
name = models.CharField(_('name'), max_length=100, unique=True)
link = models.URLField(_('link'), verify_exists=True)
class Meta:
verbose_name = _('link')
verbose_name_plural = _('links')
class Admin:
pass
def __unicode__(self):
return u'%s (%s)' % (self.name, self.link)
class Site(models.Model):
name = models.CharField(_('name'), max_length=100)
url = models.CharField(_('url'),
max_length=100,
unique=True,
help_text=u'%s: %s, %s' % (smart_unicode(_('Example')),
u'http://www.planetexample.com',
u'http://www.planetexample.com:8000/foo'))
title = models.CharField(_('title'), max_length=200)
description = models.TextField(_('description'))
welcome = models.TextField(_('welcome'), null=True, blank=True)
greets = models.TextField(_('greets'), null=True, blank=True)
default_site = models.BooleanField(_('default site'), default=False)
posts_per_page = models.IntegerField(_('posts per page'), default=20)
order_posts_by = models.IntegerField(_('order posts by'), default=1,
choices=SITE_ORDERBY_CHOICES)
tagcloud_levels = models.IntegerField(_('tagcloud level'), default=5)
show_tagcloud = models.BooleanField(_('show tagcloud'), default=True)
use_internal_cache = models.BooleanField(_('use internal cache'), default=True)
cache_duration = models.IntegerField(_('cache duration'), default=60*60*24,
help_text=_('Duration in seconds of the cached pages and data.') )
links = models.ManyToManyField(Link, verbose_name=_('links'),
null=True, blank=True)
template = models.CharField(_('template'), max_length=100, null=True,
blank=True,
help_text=_('This template must be a directory in your feedjack '
'templates directory. Leave blank to use the default template.') )
class Meta:
verbose_name = _('site')
verbose_name_plural = _('sites')
ordering = ('name',)
def __unicode__(self):
return self.name
def save(self):
if not self.template:
self.template = 'default'
# there must be only ONE default site
defs = Site.objects.filter(default_site=True)
if not defs:
self.default_site = True
elif self.default_site:
for tdef in defs:
if tdef.id != self.id:
tdef.default_site = False
tdef.save()
self.url = self.url.rstrip('/')
fjcache.hostcache_set({})
super(Site, self).save()
class Feed(models.Model):
feed_url = models.URLField(_('feed url'), unique=True)
name = models.CharField(_('name'), max_length=100)
shortname = models.CharField(_('shortname'), max_length=50)
is_active = models.BooleanField(_('is active'), default=True,
help_text=_('If disabled, this feed will not be further updated.') )
title = models.CharField(_('title'), max_length=200, blank=True)
tagline = models.TextField(_('tagline'), blank=True)
link = models.URLField(_('link'), blank=True)
# http://feedparser.org/docs/http-etag.html
etag = models.CharField(_('etag'), max_length=50, blank=True)
last_modified = models.DateTimeField(_('last modified'), null=True, blank=True)
last_checked = models.DateTimeField(_('last checked'), null=True, blank=True)
class Meta:
verbose_name = _('feed')
verbose_name_plural = _('feeds')
ordering = ('name', 'feed_url',)
def __unicode__(self):
return u'%s (%s)' % (self.name, self.feed_url)
def save(self):
super(Feed, self).save()
class Tag(models.Model):
name = models.CharField(_('name'), max_length=50, unique=True)
class Meta:
verbose_name = _('tag')
verbose_name_plural = _('tags')
ordering = ('name',)
def __unicode__(self):
return self.name
def save(self):
super(Tag, self).save()
class Post(models.Model):
feed = models.ForeignKey(Feed, verbose_name=_('feed'), null=False, blank=False)
title = models.CharField(_('title'), max_length=255)
link = models.URLField(_('link'), )
content = models.TextField(_('content'), blank=True)
date_modified = models.DateTimeField(_('date modified'), null=True, blank=True)
guid = models.CharField(_('guid'), max_length=200, db_index=True)
author = models.CharField(_('author'), max_length=50, blank=True)
author_email = models.EmailField(_('author email'), blank=True)
comments = models.URLField(_('comments'), blank=True)
tags = models.ManyToManyField(Tag, verbose_name=_('tags'))
date_created = models.DateField(_('date created'), auto_now_add=True)
class Meta:
verbose_name = _('post')
verbose_name_plural = _('posts')
ordering = ('-date_modified',)
unique_together = (('feed', 'guid'),)
def __unicode__(self):
return self.title
def save(self):
super(Post, self).save()
def get_absolute_url(self):
return self.link
class Subscriber(models.Model):
site = models.ForeignKey(Site, verbose_name=_('site') )
feed = models.ForeignKey(Feed, verbose_name=_('feed') )
name = models.CharField(_('name'), max_length=100, null=True, blank=True,
help_text=_('Keep blank to use the Feed\'s original name.') )
shortname = models.CharField(_('shortname'), max_length=50, null=True,
blank=True,
help_text=_('Keep blank to use the Feed\'s original shortname.') )
is_active = models.BooleanField(_('is active'), default=True,
help_text=_('If disabled, this subscriber will not appear in the site or '
'in the site\'s feed.') )
class Meta:
verbose_name = _('subscriber')
verbose_name_plural = _('subscribers')
ordering = ('site', 'name', 'feed')
unique_together = (('site', 'feed'),)
def __unicode__(self):
return u'%s in %s' % (self.feed, self.site)
def get_cloud(self):
from feedjack import fjcloud
return fjcloud.getcloud(self.site, self.feed.id)
def save(self):
if not self.name:
self.name = self.feed.name
if not self.shortname:
self.shortname = self.feed.shortname
super(Subscriber, self).save()
#~
| Python |
# -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
urls.py
"""
from django.conf.urls.defaults import patterns
from django.views.generic.simple import redirect_to
from feedjack import views
urlpatterns = patterns('',
(r'^rss20.xml$', redirect_to,
{'url':'/feed/rss/'}),
(r'^feed/$', redirect_to,
{'url':'/feed/atom/'}),
(r'^feed/rss/$', views.rssfeed),
(r'^feed/atom/$', views.atomfeed),
(r'^feed/user/(?P<user>\d+)/tag/(?P<tag>.*)/$', redirect_to,
{'url':'/feed/atom/user/%(user)s/tag/%(tag)s/'}),
(r'^feed/user/(?P<user>\d+)/$', redirect_to,
{'url':'/feed/atom/user/%(user)s/'}),
(r'^feed/tag/(?P<tag>.*)/$', redirect_to,
{'url':'/feed/atom/tag/%(tag)s/'}),
(r'^feed/atom/user/(?P<user>\d+)/tag/(?P<tag>.*)/$', views.atomfeed),
(r'^feed/atom/user/(?P<user>\d+)/$', views.atomfeed),
(r'^feed/atom/tag/(?P<tag>.*)/$', views.atomfeed),
(r'^feed/rss/user/(?P<user>\d+)/tag/(?P<tag>.*)/$', views.rssfeed),
(r'^feed/rss/user/(?P<user>\d+)/$', views.rssfeed),
(r'^feed/rss/tag/(?P<tag>.*)/$', views.rssfeed),
(r'^user/(?P<user>\d+)/tag/(?P<tag>.*)/$', views.mainview),
(r'^user/(?P<user>\d+)/$', views.mainview),
(r'^tag/(?P<tag>.*)/$', views.mainview),
(r'^opml/$', views.opml),
(r'^foaf/$', views.foaf),
(r'^$', views.mainview),
)
#~
| Python |
# -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
admin.py
"""
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from feedjack import models
class LinkAdmin(admin.ModelAdmin):
pass
class SiteAdmin(admin.ModelAdmin):
list_display = ('url', 'name')
filter_vertical = ('links',)
class FeedAdmin(admin.ModelAdmin):
list_display = ('name', 'feed_url', 'title', 'last_modified', \
'is_active')
fieldsets = (
(None,
{'fields':('feed_url', 'name', 'shortname', 'is_active')}),
(_('Fields updated automatically by Feedjack'),
{'classes':('collapse',),
'fields':('title', 'tagline', 'link', 'etag', 'last_modified',
'last_checked'),
})
)
search_fields = ['feed_url', 'name', 'title']
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'link', 'author', 'date_modified')
search_fields = ['link', 'title']
date_hierarchy = 'date_modified'
filter_vertical = ('tags',)
class SubscriberAdmin(admin.ModelAdmin):
list_display = ('name', 'site', 'feed')
list_filter = ('site',)
admin.site.register(models.Link, LinkAdmin)
admin.site.register(models.Site, SiteAdmin)
admin.site.register(models.Feed, FeedAdmin)
admin.site.register(models.Post, PostAdmin)
admin.site.register(models.Subscriber, SubscriberAdmin)
#~
| Python |
# -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
views.py
"""
from django.utils import feedgenerator
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.utils.cache import patch_vary_headers
from django.template import Context, loader
from feedjack import models
from feedjack import fjlib
from feedjack import fjcache
def initview(request):
""" Retrieves the basic data needed by all feeds (host, feeds, etc)
Returns a tuple of:
1. A valid cached response or None
2. The current site object
3. The cache key
4. The subscribers for the site (objects)
5. The feeds for the site (ids)
"""
site_id, cachekey = fjlib.getcurrentsite(request.META['HTTP_HOST'], \
request.META.get('REQUEST_URI', request.META.get('PATH_INFO', '/')), \
request.META['QUERY_STRING'])
response = fjcache.cache_get(site_id, cachekey)
if response:
return response, None, cachekey, [], []
site = models.Site.objects.get(pk=site_id)
sfeeds_obj = fjlib.sitefeeds(site)
sfeeds_ids = [subscriber.feed.id for subscriber in sfeeds_obj]
return None, site, cachekey, sfeeds_obj, sfeeds_ids
def blogroll(request, btype):
""" View that handles the generation of blogrolls.
"""
response, site, cachekey, sfeeds_obj, sfeeds_ids = initview(request)
if response:
return response
# for some reason this isn't working:
#
#response = render_to_response('feedjack/%s.xml' % btype, \
# fjlib.get_extra_content(site, sfeeds_ids))
#response.mimetype = 'text/xml; charset=utf-8'
#
# so we must use this:
template = loader.get_template('feedjack/%s.xml' % btype)
ctx = {}
fjlib.get_extra_content(site, sfeeds_ids, ctx)
ctx = Context(ctx)
response = HttpResponse(template.render(ctx) , \
mimetype='text/xml; charset=utf-8')
patch_vary_headers(response, ['Host'])
fjcache.cache_set(site, cachekey, response)
return response
def foaf(request):
""" View that handles the generation of the FOAF blogroll.
"""
return blogroll(request, 'foaf')
def opml(request):
""" View that handles the generation of the OPML blogroll.
"""
return blogroll(request, 'opml')
def buildfeed(request, feedclass, tag=None, user=None):
""" View that handles the feeds.
"""
response, site, cachekey, sfeeds_obj, sfeeds_ids = initview(request)
if response:
return response
object_list = fjlib.get_paginator(site, sfeeds_ids, page=0, tag=tag, \
user=user)[1]
feed = feedclass(\
title=site.title,
link=site.url,
description=site.description,
feed_url='%s/%s' % (site.url, '/feed/rss/'))
for post in object_list:
feed.add_item( \
title = '%s: %s' % (post.feed.name, post.title), \
link = post.link, \
description = post.content, \
author_email = post.author_email, \
author_name = post.author, \
pubdate = post.date_modified, \
unique_id = post.link, \
categories = [tag.name for tag in post.tags.all()])
response = HttpResponse(mimetype=feed.mime_type)
# per host caching
patch_vary_headers(response, ['Host'])
feed.write(response, 'utf-8')
if site.use_internal_cache:
fjcache.cache_set(site, cachekey, response)
return response
def rssfeed(request, tag=None, user=None):
""" Generates the RSS2 feed.
"""
return buildfeed(request, feedgenerator.Rss201rev2Feed, tag, user)
def atomfeed(request, tag=None, user=None):
""" Generates the Atom 1.0 feed.
"""
return buildfeed(request, feedgenerator.Atom1Feed, tag, user)
def mainview(request, tag=None, user=None):
""" View that handles all page requests.
"""
response, site, cachekey, sfeeds_obj, sfeeds_ids = initview(request)
if response:
return response
ctx = fjlib.page_context(request, site, tag, user, (sfeeds_obj, \
sfeeds_ids))
response = render_to_response('feedjack/%s/post_list.html' % \
(site.template), ctx)
# per host caching, in case the cache middleware is enabled
patch_vary_headers(response, ['Host'])
if site.use_internal_cache:
fjcache.cache_set(site, cachekey, response)
return response
#~
| Python |
# -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
__init__.py
"""
| Python |
# -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
fjlib.py
"""
from django.conf import settings
from django.db import connection
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404
from django.utils.encoding import smart_unicode
from feedjack import models
from feedjack import fjcache
# this is taken from django, it was removed in r8191
class ObjectPaginator(Paginator):
"""
Legacy ObjectPaginator class, for backwards compatibility.
Note that each method on this class that takes page_number expects a
zero-based page number, whereas the new API (Paginator/Page) uses one-based
page numbers.
"""
def __init__(self, query_set, num_per_page, orphans=0):
Paginator.__init__(self, query_set, num_per_page, orphans)
import warnings
warnings.warn("The ObjectPaginator is deprecated. Use django.core.paginator.Paginator instead.", DeprecationWarning)
# Keep these attributes around for backwards compatibility.
self.query_set = query_set
self.num_per_page = num_per_page
self._hits = self._pages = None
def validate_page_number(self, page_number):
try:
page_number = int(page_number) + 1
except ValueError:
raise PageNotAnInteger
return self.validate_number(page_number)
def get_page(self, page_number):
try:
page_number = int(page_number) + 1
except ValueError:
raise PageNotAnInteger
return self.page(page_number).object_list
def has_next_page(self, page_number):
return page_number < self.pages - 1
def has_previous_page(self, page_number):
return page_number > 0
def first_on_page(self, page_number):
"""
Returns the 1-based index of the first object on the given page,
relative to total objects found (hits).
"""
page_number = self.validate_page_number(page_number)
return (self.num_per_page * (page_number - 1)) + 1
def last_on_page(self, page_number):
"""
Returns the 1-based index of the last object on the given page,
relative to total objects found (hits).
"""
page_number = self.validate_page_number(page_number)
if page_number == self.num_pages:
return self.count
return page_number * self.num_per_page
# The old API called it "hits" instead of "count".
hits = Paginator.count
# The old API called it "pages" instead of "num_pages".
pages = Paginator.num_pages
def sitefeeds(siteobj):
""" Returns the active feeds of a site.
"""
return siteobj.subscriber_set.filter(is_active=True).select_related()
#return [subscriber['feed'] \
# for subscriber \
# in siteobj.subscriber_set.filter(is_active=True).values('feed')]
def getquery(query):
""" Performs a query and get the results.
"""
try:
conn = connection.cursor()
conn.execute(query)
data = conn.fetchall()
conn.close()
except:
data = []
return data
def get_extra_content(site, sfeeds_ids, ctx):
""" Returns extra data useful to the templates.
"""
# get the subscribers' feeds
if sfeeds_ids:
basefeeds = models.Feed.objects.filter(id__in=sfeeds_ids)
try:
ctx['feeds'] = basefeeds.order_by('name').select_related()
except:
ctx['feeds'] = []
# get the last_checked time
try:
ctx['last_modified'] = basefeeds.filter(\
last_checked__isnull=False).order_by(\
'-last_checked').select_related()[0].last_checked.ctime()
except:
ctx['last_modified'] = '??'
else:
ctx['feeds'] = []
ctx['last_modified'] = '??'
ctx['site'] = site
ctx['media_url'] = '%s/feedjack/%s' % (settings.MEDIA_URL, site.template)
def get_posts_tags(object_list, sfeeds_obj, user_id, tag_name):
""" Adds a qtags property in every post object in a page.
Use "qtags" instead of "tags" in templates to avoid innecesary DB hits.
"""
tagd = {}
user_obj = None
tag_obj = None
tags = models.Tag.objects.extra(\
select={'post_id':'%s.%s' % (\
connection.ops.quote_name('feedjack_post_tags'), \
connection.ops.quote_name('post_id'))}, \
tables=['feedjack_post_tags'], \
where=[\
'%s.%s=%s.%s' % (\
connection.ops.quote_name('feedjack_tag'), \
connection.ops.quote_name('id'), \
connection.ops.quote_name('feedjack_post_tags'), \
connection.ops.quote_name('tag_id')), \
'%s.%s IN (%s)' % (\
connection.ops.quote_name('feedjack_post_tags'), \
connection.ops.quote_name('post_id'), \
', '.join([str(post.id) for post in object_list]))])
for tag in tags:
if tag.post_id not in tagd:
tagd[tag.post_id] = []
tagd[tag.post_id].append(tag)
if tag_name and tag.name == tag_name:
tag_obj = tag
subd = {}
for sub in sfeeds_obj:
subd[sub.feed.id] = sub
for post in object_list:
if post.id in tagd:
post.qtags = tagd[post.id]
else:
post.qtags = []
post.subscriber = subd[post.feed.id]
if user_id and int(user_id) == post.feed.id:
user_obj = post.subscriber
return user_obj, tag_obj
def getcurrentsite(http_post, path_info, query_string):
""" Returns the site id and the page cache key based on the request.
"""
url = u'http://%s/%s' % (smart_unicode(http_post.rstrip('/')), \
smart_unicode(path_info.lstrip('/')))
pagecachekey = '%s?%s' % (smart_unicode(path_info), \
smart_unicode(query_string))
hostdict = fjcache.hostcache_get()
if not hostdict:
hostdict = {}
if url not in hostdict:
default, ret = None, None
for site in models.Site.objects.all():
if url.startswith(site.url):
ret = site
break
if not default or site.default_site:
default = site
if not ret:
if default:
ret = default
else:
# Somebody is requesting something, but the user didn't create
# a site yet. Creating a default one...
ret = models.Site(name='Default Feedjack Site/Planet', \
url='www.feedjack.org', \
title='Feedjack Site Title', \
description='Feedjack Site Description. ' \
'Please change this in the admin interface.')
ret.save()
hostdict[url] = ret.id
fjcache.hostcache_set(hostdict)
return hostdict[url], pagecachekey
def get_paginator(site, sfeeds_ids, page=0, tag=None, user=None):
""" Returns a paginator object and a requested page from it.
"""
if tag:
try:
localposts = models.Tag.objects.get(name=tag).post_set.filter(\
feed__in=sfeeds_ids)
except:
raise Http404
else:
localposts = models.Post.objects.filter(feed__in=sfeeds_ids)
if user:
try:
localposts = localposts.filter(feed=user)
except:
raise Http404
if site.order_posts_by == 2:
localposts = localposts.order_by('-date_created', '-date_modified')
else:
localposts = localposts.order_by('-date_modified')
paginator = ObjectPaginator(localposts.select_related(), \
site.posts_per_page)
try:
object_list = paginator.get_page(page)
except InvalidPage:
if page == 0:
object_list = []
else:
raise Http404
return (paginator, object_list)
def page_context(request, site, tag=None, user_id=None, sfeeds=None):
""" Returns the context dictionary for a page view.
"""
sfeeds_obj, sfeeds_ids = sfeeds
try:
page = int(request.GET.get('page', 0))
except ValueError:
page = 0
paginator, object_list = get_paginator(site, sfeeds_ids, \
page=page, tag=tag, user=user_id)
if object_list:
# This will hit the DB once per page instead of once for every post in
# a page. To take advantage of this the template designer must call
# the qtags property in every item, instead of the default tags
# property.
user_obj, tag_obj = get_posts_tags(object_list, sfeeds_obj, \
user_id, tag)
else:
user_obj, tag_obj = None, None
ctx = {
'object_list': object_list,
'is_paginated': paginator.pages > 1,
'results_per_page': site.posts_per_page,
'has_next': paginator.has_next_page(page),
'has_previous': paginator.has_previous_page(page),
'page': page + 1,
'next': page + 1,
'previous': page - 1,
'pages': paginator.pages,
'hits' : paginator.hits,
}
get_extra_content(site, sfeeds_ids, ctx)
from feedjack import fjcloud
ctx['tagcloud'] = fjcloud.getcloud(site, user_id)
ctx['user_id'] = user_id
ctx['user'] = user_obj
ctx['tag'] = tag_obj
ctx['subscribers'] = sfeeds_obj
return ctx
#~
| Python |
#!/usr/bin/env python
#
# -------------------------------
# projects/python/collatz/main.py
# Copyright (C) 2009
# Glenn P. Downing
# -------------------------------
# To run the program
# main.py < Collatz.in > Collatz.out
# To document the program
# pydoc -w main
# -------
# globals
# -------
i = 0 # input: don't change after reading
j = 0 # input: don't change after reading
v = 0 # output
carray = [] # cache for cycle length for each number
done = False # a boolean that indicates whether the cache is built or not
# -----------
# InputReader
# -----------
class InputReader (object) :
def read (self) :
return raw_input()
# ------------
# OutputWriter
# ------------
class OutputWriter (object) :
def write (self, *a) :
for w in a :
print w,
print
# -------
# my_read
# -------
def my_read (r) :
"""
reads an int into i and j
return true if that succeeds, false otherwise
"""
global i
global j
try :
s = r.read()
except EOFError :
return False
l = s.split()
i = int(l[0])
j = int(l[1])
return True
# --------
# calc_cycle
# --------
def calc_cycle (m) :
"""
calculates and returns the cycle length for a given int m
"""
assert m >= 1
c = 1
while m != 1: #loops until it hits 1
assert m != 1
if m < 500000:
if carray[m] == 0:
if m % 2 == 0: #divide by 2 is m is even
m = m >>1
c += 1
else:
m = m + (m >> 1) + 1 #if its odd, (3m+1)/2
c += 2 #increment steps by 2
else:
c = c + carray[m] - 1
m = 1
else:
if m % 2 == 0: #divide by 2 is m is even
m = m >>1
c += 1
else:
m = m + (m >> 1) + 1 #if its odd, (3m+1)/2
c += 2 #increment steps by 2
assert m == 1
assert c > 1
return c
#--------
# make_list
#--------
def make_list(size):
"""
creates a list with a size that is passed in as a parameter
fills the list with a bunch of 0s
returns the filled out list
"""
assert size > 0
mylist = []
for i in range(size): # appends the empty list with 0s
mylist.append(0)
return mylist
# -------
# my_eval
# -------
def my_eval () :
"""
computes the max cycle length in the range [i, j]
and stores the result in v
"""
global v
global i
global j
global carray
global done
v = 1
m = 0
small = 0
big = 0
if i > j:
big = i
small = j
else:
big = j
small = i
assert big >= small
x = small
if done == False:
carray = make_list(500000)
num = 1
count = 1
while num < 500000 :
carray[num] = count
num += num
count += 1
done = True
while x <= big: #iterate from i up to j
assert x <= big
m = x
cycle = 0
if m < 500000 :
if carray[m] == 0 :
cycle = calc_cycle(m)
assert cycle >= 1
carray[m] = cycle
else:
assert carray[m] >= 1
cycle = carray[m]
else:
cycle = calc_cycle(m)
if cycle > v: #update v if the cycle is longer
v = cycle
x += 1
# --------
# my_print
# --------
def my_print (w) :
"""
writes the values of i, j, and v
"""
w.write(i, j, v)
# ----
# main file
# ----
def main () :
"""
runs the program
"""
global carray
# carray = make_list(1000000)
# num = 1
# count = 1
# while num < 1000000 :
# carray[num] = count
# num += num
# count += 1
while my_read(InputReader()) :
my_eval()
my_print(OutputWriter())
if __name__ == "__main__" :
main()
| Python |
#!/usr/bin/env python
# --------------------------------------
# projects/python/collatz/TestCollatz.py
# Copyright (C) 2009
# Glenn P. Downing
# --------------------------------------
# To run the tests
# TestCollatz.py
# To document the tests
# pydoc -w TestCollatz
import main
import unittest
# ----------
# TestReader
# ----------
class TestReader (object) :
def __init__ (self, s) :
self.s = s
def read (self) :
return self.s
# ----------
# TestWriter
# ----------
class TestWriter (object) :
def str (self) :
return self.s
def write (self, *a) :
self.s = str(a[0])
for w in a[1:] :
self.s += ' '
self.s += str(w)
self.s += '\n'
# -----------
# TestCollatz
# -----------
class TestCollatz (unittest.TestCase) :
# ----
# read
# ----
def test_read (self) :
reader = TestReader('1 10\n')
main.my_read(reader)
self.assert_(main.i == 1)
self.assert_(main.j == 10)
def test_read2 (self):
reader = TestReader('2000 1111\n')
main.my_read(reader)
self.assert_(main.i == 2000)
self.assert_(main.j == 1111)
# ----
# eval
# ----
def test_eval1 (self) :
main.i = 1
main.j = 10
main.v = 0
main.my_eval()
self.assert_(main.v == 20)
def test_eval2 (self) :
main.i = 100
main.j = 200
main.v = 0
main.my_eval()
self.assert_(main.v == 125)
def test_eval3 (self) :
main.i = 201
main.j = 210
main.v = 0
main.my_eval()
self.assert_(main.v == 89)
def test_eval4 (self) :
main.i = 900
main.j = 1000
main.v = 0
main.my_eval()
self.assert_(main.v == 174)
def test_eval5 (self) :
main.i = 200
main.j = 78
main.v = 0
main.my_eval()
self.assert_(main.v == 125)
def test_eval6 (self) :
main.i = 9997
main.j = 10000
main.v = 0
main.my_eval()
self.assert_(main.v == 180)
def test_eval7 (self) :
main.i = 234590
main.j = 457991
main.v = 0
main.my_eval()
self.assert_(main.v == 449)
def test_eval8 (self) :
main.i = 1
main.j = 999999
main.v = 0
main.my_eval()
self.assert_(main.v == 525)
# -----
# print
# -----
def test_print1 (self) :
main.i = 1
main.j = 10
main.v = 20
writer = TestWriter()
main.my_print(writer)
self.assert_(writer.str() == '1 10 20\n')
def test_print2 (self) :
main.i = 1000
main.j = 350
main.v = 179
writer = TestWriter()
main.my_print(writer)
self.assert_(writer.str() == '1000 350 179\n')
if __name__ == "__main__" :
unittest.main()
| Python |
#!/usr/bin/env python
#
# -------------------------------
# projects/python/collatz/main.py
# Copyright (C) 2009
# Glenn P. Downing
# -------------------------------
# To run the program
# main.py < Collatz.in > Collatz.out
# To document the program
# pydoc -w main
# -------
# globals
# -------
i = 0 # input: don't change after reading
j = 0 # input: don't change after reading
v = 0 # output
carray = [] # cache for cycle length for each number
done = False # a boolean that indicates whether the cache is built or not
# -----------
# InputReader
# -----------
class InputReader (object) :
def read (self) :
return raw_input()
# ------------
# OutputWriter
# ------------
class OutputWriter (object) :
def write (self, *a) :
for w in a :
print w,
print
# -------
# my_read
# -------
def my_read (r) :
"""
reads an int into i and j
return true if that succeeds, false otherwise
"""
global i
global j
try :
s = r.read()
except EOFError :
return False
l = s.split()
i = int(l[0])
j = int(l[1])
return True
# --------
# calc_cycle
# --------
def calc_cycle (m) :
"""
calculates and returns the cycle length for a given int m
"""
assert m >= 1
c = 1
while m != 1: #loops until it hits 1
assert m != 1
if m < 500000:
if carray[m] == 0:
if m % 2 == 0: #divide by 2 is m is even
m = m >>1
c += 1
else:
m = m + (m >> 1) + 1 #if its odd, (3m+1)/2
c += 2 #increment steps by 2
else:
c = c + carray[m] - 1
m = 1
else:
if m % 2 == 0: #divide by 2 is m is even
m = m >>1
c += 1
else:
m = m + (m >> 1) + 1 #if its odd, (3m+1)/2
c += 2 #increment steps by 2
assert m == 1
assert c > 1
return c
#--------
# make_list
#--------
def make_list(size):
"""
creates a list with a size that is passed in as a parameter
fills the list with a bunch of 0s
returns the filled out list
"""
assert size > 0
mylist = []
for i in range(size): # appends the empty list with 0s
mylist.append(0)
return mylist
# -------
# my_eval
# -------
def my_eval () :
"""
computes the max cycle length in the range [i, j]
and stores the result in v
"""
global v
global i
global j
global carray
global done
v = 1
m = 0
small = 0
big = 0
if i > j:
big = i
small = j
else:
big = j
small = i
assert big >= small
x = small
if done == False:
carray = make_list(500000)
num = 1
count = 1
while num < 500000 :
carray[num] = count
num += num
count += 1
done = True
while x <= big: #iterate from i up to j
assert x <= big
m = x
cycle = 0
if m < 500000 :
if carray[m] == 0 :
cycle = calc_cycle(m)
assert cycle >= 1
carray[m] = cycle
else:
assert carray[m] >= 1
cycle = carray[m]
else:
cycle = calc_cycle(m)
if cycle > v: #update v if the cycle is longer
v = cycle
x += 1
# --------
# my_print
# --------
def my_print (w) :
"""
writes the values of i, j, and v
"""
w.write(i, j, v)
# ----
# main file
# ----
def main () :
"""
runs the program
"""
global carray
# carray = make_list(1000000)
# num = 1
# count = 1
# while num < 1000000 :
# carray[num] = count
# num += num
# count += 1
while my_read(InputReader()) :
my_eval()
my_print(OutputWriter())
if __name__ == "__main__" :
main()
| Python |
#!/usr/bin/env python
# --------------------------------------
# projects/python/collatz/TestCollatz.py
# Copyright (C) 2009
# Glenn P. Downing
# --------------------------------------
# To run the tests
# TestCollatz.py
# To document the tests
# pydoc -w TestCollatz
import main
import unittest
# ----------
# TestReader
# ----------
class TestReader (object) :
def __init__ (self, s) :
self.s = s
def read (self) :
return self.s
# ----------
# TestWriter
# ----------
class TestWriter (object) :
def str (self) :
return self.s
def write (self, *a) :
self.s = str(a[0])
for w in a[1:] :
self.s += ' '
self.s += str(w)
self.s += '\n'
# -----------
# TestCollatz
# -----------
class TestCollatz (unittest.TestCase) :
# ----
# read
# ----
def test_read (self) :
reader = TestReader('1 10\n')
main.my_read(reader)
self.assert_(main.i == 1)
self.assert_(main.j == 10)
def test_read2 (self):
reader = TestReader('2000 1111\n')
main.my_read(reader)
self.assert_(main.i == 2000)
self.assert_(main.j == 1111)
# ----
# eval
# ----
def test_eval1 (self) :
main.i = 1
main.j = 10
main.v = 0
main.my_eval()
self.assert_(main.v == 20)
def test_eval2 (self) :
main.i = 100
main.j = 200
main.v = 0
main.my_eval()
self.assert_(main.v == 125)
def test_eval3 (self) :
main.i = 201
main.j = 210
main.v = 0
main.my_eval()
self.assert_(main.v == 89)
def test_eval4 (self) :
main.i = 900
main.j = 1000
main.v = 0
main.my_eval()
self.assert_(main.v == 174)
def test_eval5 (self) :
main.i = 200
main.j = 78
main.v = 0
main.my_eval()
self.assert_(main.v == 125)
def test_eval6 (self) :
main.i = 9997
main.j = 10000
main.v = 0
main.my_eval()
self.assert_(main.v == 180)
def test_eval7 (self) :
main.i = 234590
main.j = 457991
main.v = 0
main.my_eval()
self.assert_(main.v == 449)
def test_eval8 (self) :
main.i = 1
main.j = 999999
main.v = 0
main.my_eval()
self.assert_(main.v == 525)
# -----
# print
# -----
def test_print1 (self) :
main.i = 1
main.j = 10
main.v = 20
writer = TestWriter()
main.my_print(writer)
self.assert_(writer.str() == '1 10 20\n')
def test_print2 (self) :
main.i = 1000
main.j = 350
main.v = 179
writer = TestWriter()
main.my_print(writer)
self.assert_(writer.str() == '1000 350 179\n')
if __name__ == "__main__" :
unittest.main()
| Python |
#!/usr/bin/env pypy
import os, sys, logging, re
import argparse
import fnmatch
configurations = {'lite', 'pro'}
package_dirs = {
'lite': ('src/cx/hell/android/pdfview',),
'pro': ('src/cx/hell/android/pdfviewpro',)
}
file_replaces = {
'lite': (
'cx.hell.android.pdfview.',
'"cx.hell.android.pdfview"',
'package cx.hell.android.pdfview;',
'android:icon="@drawable/pdfviewer"',
),
'pro': (
'cx.hell.android.pdfviewpro.',
'"cx.hell.android.pdfviewpro"',
'package cx.hell.android.pdfviewpro;',
'android:icon="@drawable/apvpro_icon"',
),
}
def make_comment(file_type, line):
"""Add comment to line and return modified line, but try not to add comments to already commented out lines."""
if file_type in ('java', 'c'):
return '// ' + line if not line.startswith('//') else line
elif file_type in ('html', 'xml'):
return '<!-- ' + line.strip() + ' -->\n' if not line.strip().startswith('<!--') else line
else:
raise Exception("unknown file type: %s" % file_type)
def remove_comment(file_type, line):
"""Remove comment from line, but only if line is commented, otherwise return unchanged line."""
if file_type in ('java', 'c'):
if line.startswith('// '): return line[3:]
else: return line
elif file_type in ('html', 'xml'):
if line.strip().startswith('<!-- ') and line.strip().endswith(' -->'): return line.strip()[5:-4] + '\n'
else: return line
else:
raise Exception("unknown file type: %s" % file_type)
def handle_comments(conf, file_type, lines, filename):
new_lines = []
re_cmd_starts = re.compile(r'(?:(//|<!--))\s+#ifdef\s+(?P<def>[a-zA-Z]+)')
re_cmd_ends = re.compile(r'(?:(//|<!--))\s+#endif')
required_defs = []
for i, line in enumerate(lines):
m = re_cmd_starts.search(line)
if m:
required_def = m.group('def')
logging.debug("line %s:%d %s matches as start of %s" % (filename, i+1, line.strip(), required_def))
required_defs.append(required_def)
new_lines.append(line)
continue
m = re_cmd_ends.search(line)
if m:
logging.debug("line %s:%d %s matches as endif" % (filename, i+1, line.strip()))
required_defs.pop()
new_lines.append(line)
continue
if len(required_defs) == 0:
new_lines.append(line)
elif len(required_defs) == 1 and required_defs[0] == conf:
new_line = remove_comment(file_type, line)
new_lines.append(new_line)
else:
new_line = make_comment(file_type, line)
new_lines.append(new_line)
assert len(new_lines) == len(lines)
return new_lines
def find_files(dirname, name):
matches = []
for root, dirnames, filenames in os.walk(dirname):
for filename in fnmatch.filter(filenames, name):
matches.append(os.path.join(root, filename))
return matches
def fix_package_dirs(conf):
for i, dirname in enumerate(package_dirs[conf]):
logging.debug("trying to restore %s" % dirname)
if os.path.exists(dirname):
if os.path.isdir(dirname):
logging.debug(" already exists")
continue
else:
logging.error(" %s already exists, but is not dir" % dirname)
continue
# find other name
found_dirname = None
for other_conf, other_dirnames in package_dirs.items():
other_dirname = other_dirnames[i]
if other_conf == conf: continue # skip this conf when looking for other conf
if os.path.isdir(other_dirname):
if found_dirname is None:
found_dirname = other_dirname
else:
# source dir already found :/
raise Exception("too many possible dirs for this package: %s, %s" % (found_dirname, other_dirname))
if found_dirname is None:
raise Exception("didn't find %s" % dirname)
# now rename found_dirname to dirname
os.rename(found_dirname, dirname)
logging.debug("renamed %s to %s" % (found_dirname, dirname))
def handle_comments_in_files(conf, file_type, filenames):
for filename in filenames:
lines = open(filename).readlines()
new_lines = handle_comments(conf, file_type, lines, filename)
if lines != new_lines:
logging.debug("file %s comments changed" % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
def replace_in_files(conf, filenames):
#logging.debug("about replace to %s in %s" % (conf, ', '.join(filenames)))
other_confs = [other_conf for other_conf in file_replaces.keys() if other_conf != conf]
#logging.debug("there are %d other confs to replace from: %s" % (len(other_confs), ', '.join(other_confs)))
for filename in filenames:
new_lines = []
lines = open(filename).readlines()
for line in lines:
new_line = line
for i, target_string in enumerate(file_replaces[conf]):
for other_conf in other_confs:
source_string = file_replaces[other_conf][i]
new_line = new_line.replace(source_string, target_string)
new_lines.append(new_line)
if new_lines != lines:
logging.debug("file %s changed, writing..." % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
else:
logging.debug("file %s didn't change, no need to rewrite" % filename)
def fix_java_files(conf):
filenames = find_files('src', name='*.java')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'java', filenames)
def fix_xml_files(conf):
filenames = find_files('.', name='*.xml')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'xml', filenames)
def fix_html_files(conf):
filenames = find_files('res', name='*.html')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'html', filenames)
def fix_c_files(conf):
filenames = find_files('jni/pdfview2', name='*.c')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
filenames = find_files('jni/pdfview2', name='*.h')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
def fix_resources(conf):
pass
def main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
parser = argparse.ArgumentParser(description='Switch project configurations')
parser.add_argument('--configuration', dest='configuration', default='lite')
args = parser.parse_args()
if not os.path.exists('AndroidManifest.xml'):
raise Exception('android manifest not found, please run this script from main project directory')
conf = args.configuration
if conf not in configurations:
raise Exception("invalid configuration: %s" % conf)
fix_package_dirs(conf)
fix_java_files(conf)
fix_xml_files(conf)
fix_html_files(conf)
fix_c_files(conf)
fix_resources(conf)
if __name__ == '__main__':
main()
| Python |
#!/usr/local/bin/python
"""
This script converts a subset of SVG into an HTML imagemap
Note *subset*. It only handles <path> elements, for which it only pays
attention to the M and L commands. Futher, it only notices the "translate"
transform.
It was written to generate the examples in the documentation for maphilight,
and thus is very squarely aimed at handling several SVG maps from wikipedia.
It *assumes* that all the <path>s it will need are inside a <g>. Any <path>
outside of a <g> will be ignored.
It takes several possible arguments, in the form:
$ svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]
FILENAME must be the name of an SVG file. All other arguments are optional.
x and y, if present, are the dimensions of the image you'll be creating from
the SVG. If not present, it assumes the values of the width and height
attributes in the SVG file.
group1 through groupN are group ids. If only want particular groups used,
enter their ids here and all others will be ignored.
"""
import os
import re
import sys
import xml.dom.minidom
import parse_path
if len(sys.argv) == 1:
sys.exit("svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]")
if not os.path.exists(sys.argv[1]):
sys.exit("Input file does not exist")
x, y, groups = None, None, None
if len(sys.argv) >= 3:
x = float(sys.argv[2])
y = float(sys.argv[3])
if len(sys.argv) > 3:
groups = sys.argv[4:]
svg_file = xml.dom.minidom.parse(sys.argv[1])
svg = svg_file.getElementsByTagName('svg')[0]
raw_width = float(svg.getAttribute('width'))
raw_height = float(svg.getAttribute('height'))
width_ratio = x and (x / raw_width) or 1
height_ratio = y and (y / raw_height) or 1
if groups:
elements = [g for g in svg.getElementsByTagName('g') if (g.hasAttribute('id') and g.getAttribute('id') in groups)]
elements.extend([p for p in svg.getElementsByTagName('path') if (p.hasAttribute('id') and p.getAttribute('id') in groups)])
else:
elements = svg.getElementsByTagName('g')
parsed_groups = {}
for e in elements:
paths = []
if e.nodeName == 'g':
for path in e.getElementsByTagName('path'):
points = parse_path.get_points(path.getAttribute('d'))
for pointset in points:
paths.append([path.getAttribute('id'), pointset])
else:
points = parse_path.get_points(e.getAttribute('d'))
for pointset in points:
paths.append([e.getAttribute('id'), pointset])
if e.hasAttribute('transform'):
print e.getAttribute('id'), e.getAttribute('transform')
for transform in re.findall(r'(\w+)\((-?\d+.?\d*),(-?\d+.?\d*)\)', e.getAttribute('transform')):
if transform[0] == 'translate':
x_shift = float(transform[1])
y_shift = float(transform[2])
for path in paths:
path[1] = [(p[0] + x_shift, p[1] + y_shift) for p in path[1]]
parsed_groups[e.getAttribute('id')] = paths
out = []
for g in parsed_groups:
for path in parsed_groups[g]:
out.append('<area href="#" title="%s" shape="poly" coords="%s"></area>' %
(path[0], ', '.join([("%d,%d" % (p[0]*width_ratio, p[1]*height_ratio)) for p in path[1]])))
outfile = open(sys.argv[1].replace('.svg', '.html'), 'w')
outfile.write('\n'.join(out)) | Python |
#!/usr/local/bin/python
"""
Based on: http://wxpsvg.googlecode.com/svn/trunk/svg/pathdata.py
According to that project, this file is licensed under the LGPL
"""
try:
from pyparsing import (ParserElement, Literal, Word, CaselessLiteral,
Optional, Combine, Forward, ZeroOrMore, nums, oneOf, Group, ParseException, OneOrMore)
except ImportError:
import sys
sys.exit("pyparsing is required")
#ParserElement.enablePackrat()
def Command(char):
""" Case insensitive but case preserving"""
return CaselessPreservingLiteral(char)
def Arguments(token):
return Group(token)
class CaselessPreservingLiteral(CaselessLiteral):
""" Like CaselessLiteral, but returns the match as found
instead of as defined.
"""
def __init__( self, matchString ):
super(CaselessPreservingLiteral,self).__init__( matchString.upper() )
self.name = "'%s'" % matchString
self.errmsg = "Expected " + self.name
self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
test = instring[ loc:loc+self.matchLen ]
if test.upper() == self.match:
return loc+self.matchLen, test
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def Sequence(token):
""" A sequence of the token"""
return OneOrMore(token+maybeComma)
digit_sequence = Word(nums)
sign = oneOf("+ -")
def convertToFloat(s, loc, toks):
try:
return float(toks[0])
except:
raise ParseException(loc, "invalid float format %s"%toks[0])
exponent = CaselessLiteral("e")+Optional(sign)+Word(nums)
#note that almost all these fields are optional,
#and this can match almost anything. We rely on Pythons built-in
#float() function to clear out invalid values - loosely matching like this
#speeds up parsing quite a lot
floatingPointConstant = Combine(
Optional(sign) +
Optional(Word(nums)) +
Optional(Literal(".") + Optional(Word(nums)))+
Optional(exponent)
)
floatingPointConstant.setParseAction(convertToFloat)
number = floatingPointConstant
#same as FP constant but don't allow a - sign
nonnegativeNumber = Combine(
Optional(Word(nums)) +
Optional(Literal(".") + Optional(Word(nums)))+
Optional(exponent)
)
nonnegativeNumber.setParseAction(convertToFloat)
coordinate = number
#comma or whitespace can seperate values all over the place in SVG
maybeComma = Optional(Literal(',')).suppress()
coordinateSequence = Sequence(coordinate)
coordinatePair = (coordinate + maybeComma + coordinate).setParseAction(lambda t: tuple(t))
coordinatePairSequence = Sequence(coordinatePair)
coordinatePairPair = coordinatePair + maybeComma + coordinatePair
coordinatePairPairSequence = Sequence(Group(coordinatePairPair))
coordinatePairTriple = coordinatePair + maybeComma + coordinatePair + maybeComma + coordinatePair
coordinatePairTripleSequence = Sequence(Group(coordinatePairTriple))
#commands
lineTo = Group(Command("L") + Arguments(coordinatePairSequence))
curve = Group(Command("C") + Arguments(coordinatePairSequence))
moveTo = Group(Command("M") + Arguments(coordinatePairSequence))
closePath = Group(Command("Z")).setParseAction(lambda t: ('Z', (None,)))
flag = oneOf("1 0").setParseAction(lambda t: bool(int((t[0]))))
arcRadius = (
nonnegativeNumber + maybeComma + #rx
nonnegativeNumber #ry
).setParseAction(lambda t: tuple(t))
arcFlags = (flag + maybeComma + flag).setParseAction(lambda t: tuple(t))
ellipticalArcArgument = Group(
arcRadius + maybeComma + #rx, ry
number + maybeComma +#rotation
arcFlags + #large-arc-flag, sweep-flag
coordinatePair #(x,y)
)
ellipticalArc = Group(Command("A") + Arguments(Sequence(ellipticalArcArgument)))
smoothQuadraticBezierCurveto = Group(Command("T") + Arguments(coordinatePairSequence))
quadraticBezierCurveto = Group(Command("Q") + Arguments(coordinatePairPairSequence))
smoothCurve = Group(Command("S") + Arguments(coordinatePairPairSequence))
#curve = Group(Command("C") + Arguments(coordinatePairTripleSequence))
horizontalLine = Group(Command("H") + Arguments(coordinateSequence))
verticalLine = Group(Command("V") + Arguments(coordinateSequence))
drawToCommand = (
lineTo | moveTo | closePath | ellipticalArc | smoothQuadraticBezierCurveto |
quadraticBezierCurveto | smoothCurve | curve | horizontalLine | verticalLine
)
#~ number.debug = True
moveToDrawToCommands = moveTo + ZeroOrMore(drawToCommand)
path = ZeroOrMore(moveToDrawToCommands)
path.keepTabs = True
def get_points(d):
commands = path.parseString(d)
points = []
currentset = None
for command in commands:
if command[0] == 'M':
currentset = []
points.append(currentset)
currentset.append(command[1][-1])
elif command[0] == 'L':
currentset.extend(command[1])
elif command[0] == 'C':
currentset.extend(command[1])
return points
if __name__ == "__main__":
print path.parseString("M 242.96145,653.59282 L 244.83646,650.1553 L 247.02397,649.8428 L 247.33647,650.62405 L 245.30521,653.59282 L 242.96145,653.59282 z M 252.80525,649.99905 L 258.74278,652.49906 L 260.77404,652.18656 L 262.33654,648.43654 L 261.71154,645.15528 L 257.64902,644.68653 L 253.74275,646.40528 L 252.80525,649.99905 z M 282.49289,659.6866 L 286.08665,664.99912 L 288.43041,664.68662 L 289.52417,664.21787 L 290.93042,665.46787 L 294.52419,665.31162 L 295.4617,663.90537 L 292.64918,662.18661 L 290.77417,658.59284 L 288.74291,655.15533 L 283.11789,657.96784 L 282.49289,659.6866 z M 302.02423,668.28039 L 303.27423,666.40538 L 307.8055,667.34288 L 308.43051,666.87413 L 314.36803,667.49913 L 314.05553,668.74914 L 311.55552,670.15539 L 307.33675,669.84289 L 302.02423,668.28039 z M 307.1805,673.28041 L 309.05551,677.03043 L 312.02427,675.93667 L 312.33677,674.37416 L 310.77427,672.3429 L 307.1805,672.0304 L 307.1805,673.28041 z M 313.89928,672.18665 L 316.08679,669.37414 L 320.61806,671.7179 L 324.83683,672.81166 L 329.0556,675.46792 L 329.0556,677.34293 L 325.61809,679.06169 L 320.93056,679.99919 L 318.5868,678.59293 L 313.89928,672.18665 z M 329.99311,687.18672 L 331.55561,685.93672 L 334.83688,687.49923 L 342.18066,690.93674 L 345.46193,692.968 L 347.02443,695.31176 L 348.89944,699.53053 L 352.80571,702.03054 L 352.49321,703.28055 L 348.74319,706.40556 L 344.68067,707.81182 L 343.27442,707.18682 L 340.30565,708.90557 L 337.96189,712.03059 L 335.77438,714.8431 L 334.05562,714.68685 L 330.61811,712.18684 L 330.30561,707.81182 L 330.93061,705.46806 L 329.3681,699.99928 L 327.33684,698.28052 L 327.18059,695.78051 L 329.3681,694.84301 L 331.39936,691.87425 L 331.86811,690.93674 L 330.30561,689.21798 L 329.99311,687.18672 z ") | Python |
#!/usr/local/bin/python
"""
Based on: http://wxpsvg.googlecode.com/svn/trunk/svg/pathdata.py
According to that project, this file is licensed under the LGPL
"""
try:
from pyparsing import (ParserElement, Literal, Word, CaselessLiteral,
Optional, Combine, Forward, ZeroOrMore, nums, oneOf, Group, ParseException, OneOrMore)
except ImportError:
import sys
sys.exit("pyparsing is required")
#ParserElement.enablePackrat()
def Command(char):
""" Case insensitive but case preserving"""
return CaselessPreservingLiteral(char)
def Arguments(token):
return Group(token)
class CaselessPreservingLiteral(CaselessLiteral):
""" Like CaselessLiteral, but returns the match as found
instead of as defined.
"""
def __init__( self, matchString ):
super(CaselessPreservingLiteral,self).__init__( matchString.upper() )
self.name = "'%s'" % matchString
self.errmsg = "Expected " + self.name
self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
test = instring[ loc:loc+self.matchLen ]
if test.upper() == self.match:
return loc+self.matchLen, test
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def Sequence(token):
""" A sequence of the token"""
return OneOrMore(token+maybeComma)
digit_sequence = Word(nums)
sign = oneOf("+ -")
def convertToFloat(s, loc, toks):
try:
return float(toks[0])
except:
raise ParseException(loc, "invalid float format %s"%toks[0])
exponent = CaselessLiteral("e")+Optional(sign)+Word(nums)
#note that almost all these fields are optional,
#and this can match almost anything. We rely on Pythons built-in
#float() function to clear out invalid values - loosely matching like this
#speeds up parsing quite a lot
floatingPointConstant = Combine(
Optional(sign) +
Optional(Word(nums)) +
Optional(Literal(".") + Optional(Word(nums)))+
Optional(exponent)
)
floatingPointConstant.setParseAction(convertToFloat)
number = floatingPointConstant
#same as FP constant but don't allow a - sign
nonnegativeNumber = Combine(
Optional(Word(nums)) +
Optional(Literal(".") + Optional(Word(nums)))+
Optional(exponent)
)
nonnegativeNumber.setParseAction(convertToFloat)
coordinate = number
#comma or whitespace can seperate values all over the place in SVG
maybeComma = Optional(Literal(',')).suppress()
coordinateSequence = Sequence(coordinate)
coordinatePair = (coordinate + maybeComma + coordinate).setParseAction(lambda t: tuple(t))
coordinatePairSequence = Sequence(coordinatePair)
coordinatePairPair = coordinatePair + maybeComma + coordinatePair
coordinatePairPairSequence = Sequence(Group(coordinatePairPair))
coordinatePairTriple = coordinatePair + maybeComma + coordinatePair + maybeComma + coordinatePair
coordinatePairTripleSequence = Sequence(Group(coordinatePairTriple))
#commands
lineTo = Group(Command("L") + Arguments(coordinatePairSequence))
curve = Group(Command("C") + Arguments(coordinatePairSequence))
moveTo = Group(Command("M") + Arguments(coordinatePairSequence))
closePath = Group(Command("Z")).setParseAction(lambda t: ('Z', (None,)))
flag = oneOf("1 0").setParseAction(lambda t: bool(int((t[0]))))
arcRadius = (
nonnegativeNumber + maybeComma + #rx
nonnegativeNumber #ry
).setParseAction(lambda t: tuple(t))
arcFlags = (flag + maybeComma + flag).setParseAction(lambda t: tuple(t))
ellipticalArcArgument = Group(
arcRadius + maybeComma + #rx, ry
number + maybeComma +#rotation
arcFlags + #large-arc-flag, sweep-flag
coordinatePair #(x,y)
)
ellipticalArc = Group(Command("A") + Arguments(Sequence(ellipticalArcArgument)))
smoothQuadraticBezierCurveto = Group(Command("T") + Arguments(coordinatePairSequence))
quadraticBezierCurveto = Group(Command("Q") + Arguments(coordinatePairPairSequence))
smoothCurve = Group(Command("S") + Arguments(coordinatePairPairSequence))
#curve = Group(Command("C") + Arguments(coordinatePairTripleSequence))
horizontalLine = Group(Command("H") + Arguments(coordinateSequence))
verticalLine = Group(Command("V") + Arguments(coordinateSequence))
drawToCommand = (
lineTo | moveTo | closePath | ellipticalArc | smoothQuadraticBezierCurveto |
quadraticBezierCurveto | smoothCurve | curve | horizontalLine | verticalLine
)
#~ number.debug = True
moveToDrawToCommands = moveTo + ZeroOrMore(drawToCommand)
path = ZeroOrMore(moveToDrawToCommands)
path.keepTabs = True
def get_points(d):
commands = path.parseString(d)
points = []
currentset = None
for command in commands:
if command[0] == 'M':
currentset = []
points.append(currentset)
currentset.append(command[1][-1])
elif command[0] == 'L':
currentset.extend(command[1])
elif command[0] == 'C':
currentset.extend(command[1])
return points
if __name__ == "__main__":
print path.parseString("M 242.96145,653.59282 L 244.83646,650.1553 L 247.02397,649.8428 L 247.33647,650.62405 L 245.30521,653.59282 L 242.96145,653.59282 z M 252.80525,649.99905 L 258.74278,652.49906 L 260.77404,652.18656 L 262.33654,648.43654 L 261.71154,645.15528 L 257.64902,644.68653 L 253.74275,646.40528 L 252.80525,649.99905 z M 282.49289,659.6866 L 286.08665,664.99912 L 288.43041,664.68662 L 289.52417,664.21787 L 290.93042,665.46787 L 294.52419,665.31162 L 295.4617,663.90537 L 292.64918,662.18661 L 290.77417,658.59284 L 288.74291,655.15533 L 283.11789,657.96784 L 282.49289,659.6866 z M 302.02423,668.28039 L 303.27423,666.40538 L 307.8055,667.34288 L 308.43051,666.87413 L 314.36803,667.49913 L 314.05553,668.74914 L 311.55552,670.15539 L 307.33675,669.84289 L 302.02423,668.28039 z M 307.1805,673.28041 L 309.05551,677.03043 L 312.02427,675.93667 L 312.33677,674.37416 L 310.77427,672.3429 L 307.1805,672.0304 L 307.1805,673.28041 z M 313.89928,672.18665 L 316.08679,669.37414 L 320.61806,671.7179 L 324.83683,672.81166 L 329.0556,675.46792 L 329.0556,677.34293 L 325.61809,679.06169 L 320.93056,679.99919 L 318.5868,678.59293 L 313.89928,672.18665 z M 329.99311,687.18672 L 331.55561,685.93672 L 334.83688,687.49923 L 342.18066,690.93674 L 345.46193,692.968 L 347.02443,695.31176 L 348.89944,699.53053 L 352.80571,702.03054 L 352.49321,703.28055 L 348.74319,706.40556 L 344.68067,707.81182 L 343.27442,707.18682 L 340.30565,708.90557 L 337.96189,712.03059 L 335.77438,714.8431 L 334.05562,714.68685 L 330.61811,712.18684 L 330.30561,707.81182 L 330.93061,705.46806 L 329.3681,699.99928 L 327.33684,698.28052 L 327.18059,695.78051 L 329.3681,694.84301 L 331.39936,691.87425 L 331.86811,690.93674 L 330.30561,689.21798 L 329.99311,687.18672 z ") | Python |
#!/usr/local/bin/python
"""
This script converts a subset of SVG into an HTML imagemap
Note *subset*. It only handles <path> elements, for which it only pays
attention to the M and L commands. Futher, it only notices the "translate"
transform.
It was written to generate the examples in the documentation for maphilight,
and thus is very squarely aimed at handling several SVG maps from wikipedia.
It *assumes* that all the <path>s it will need are inside a <g>. Any <path>
outside of a <g> will be ignored.
It takes several possible arguments, in the form:
$ svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]
FILENAME must be the name of an SVG file. All other arguments are optional.
x and y, if present, are the dimensions of the image you'll be creating from
the SVG. If not present, it assumes the values of the width and height
attributes in the SVG file.
group1 through groupN are group ids. If only want particular groups used,
enter their ids here and all others will be ignored.
"""
import os
import re
import sys
import xml.dom.minidom
import parse_path
if len(sys.argv) == 1:
sys.exit("svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]")
if not os.path.exists(sys.argv[1]):
sys.exit("Input file does not exist")
x, y, groups = None, None, None
if len(sys.argv) >= 3:
x = float(sys.argv[2])
y = float(sys.argv[3])
if len(sys.argv) > 3:
groups = sys.argv[4:]
svg_file = xml.dom.minidom.parse(sys.argv[1])
svg = svg_file.getElementsByTagName('svg')[0]
raw_width = float(svg.getAttribute('width'))
raw_height = float(svg.getAttribute('height'))
width_ratio = x and (x / raw_width) or 1
height_ratio = y and (y / raw_height) or 1
if groups:
elements = [g for g in svg.getElementsByTagName('g') if (g.hasAttribute('id') and g.getAttribute('id') in groups)]
elements.extend([p for p in svg.getElementsByTagName('path') if (p.hasAttribute('id') and p.getAttribute('id') in groups)])
else:
elements = svg.getElementsByTagName('g')
parsed_groups = {}
for e in elements:
paths = []
if e.nodeName == 'g':
for path in e.getElementsByTagName('path'):
points = parse_path.get_points(path.getAttribute('d'))
for pointset in points:
paths.append([path.getAttribute('id'), pointset])
else:
points = parse_path.get_points(e.getAttribute('d'))
for pointset in points:
paths.append([e.getAttribute('id'), pointset])
if e.hasAttribute('transform'):
print e.getAttribute('id'), e.getAttribute('transform')
for transform in re.findall(r'(\w+)\((-?\d+.?\d*),(-?\d+.?\d*)\)', e.getAttribute('transform')):
if transform[0] == 'translate':
x_shift = float(transform[1])
y_shift = float(transform[2])
for path in paths:
path[1] = [(p[0] + x_shift, p[1] + y_shift) for p in path[1]]
parsed_groups[e.getAttribute('id')] = paths
out = []
for g in parsed_groups:
for path in parsed_groups[g]:
out.append('<area href="#" title="%s" shape="poly" coords="%s"></area>' %
(path[0], ', '.join([("%d,%d" % (p[0]*width_ratio, p[1]*height_ratio)) for p in path[1]])))
outfile = open(sys.argv[1].replace('.svg', '.html'), 'w')
outfile.write('\n'.join(out)) | Python |
#!/usr/bin/env python
import calendar
import logging
import os
import stat
import time
from threading import Thread, Lock
from crypto import CryptoHelper
from S3 import Connection
from SQLiteHelper import SQLiteHelper as SQL
import constants as C
class SafeDepositBox(Thread):
def __init__(self):
Thread.__init__(self)
self.admin_directory = os.path.expanduser("~/.safedepositbox")
self.db = SQL(os.path.expanduser("~/.safedepositbox"))
# config: dict.
config = self.db.get_config()
# Should be apart of init process..
self.sdb_directory = os.path.expanduser(config['sdb_directory'])
if not os.path.exists(self.sdb_directory):
os.mkdir(self.sdb_directory)
elif not os.path.isdir(self.sdb_directory):
os.remove(self.sdb_directory)
os.mkdir(self.sdb_directory)
# file -> [updated?, file's mtime]
self.known_files = dict()
self.known_files_lock = Lock()
self.known_files_locks = dict()
self.crypto_helper = CryptoHelper(os.path.expanduser('~/.safedepositbox/keys'))
config['staging_directory'] = os.path.join(self.admin_directory, 'staging')
config['bucket_name'] = 'safe-deposit-box'
self.S3Conn = Connection(config, prefix='/data')
def reset_known_files(self):
for filename in self.known_files:
self.known_files[filename][C.STATUS] = C.NOT_VISITED
def _bad_file(self, filename):
extension = filename.split('.')[-1]
if ('swp' == extension):
return True
if '#' in filename:
return True
return False
def walktree(self, top, callback):
'''recursively descend the directory tree rooted at top,
calling the callback function for each regular file'''
top = os.path.abspath(top)
for filename in os.listdir(top):
if self._bad_file(filename):
#self.sdb_logger.debug("Badfile: %s" % filename)
logging.debug("Badfile: %s" % filename)
continue
pathname = os.path.join(top, filename)
try:
mode = os.stat(pathname)[stat.ST_MODE]
except OSError, e:
# means the file isn't there anymore
# market file for deletion
print e
continue
if stat.S_ISDIR(mode):
# It's a directory, recurse into it
self.walktree(pathname, callback)
elif stat.S_ISREG(mode):
# It's a file, call the callback function
callback(pathname)
else:
# Unknown file type, print a message
print 'Skipping %s' % pathname
def _lm_to_epoch(self, last_modified_time):
return calendar.timegm(time.strptime(last_modified_time.replace("Z",''), u"%Y-%m-%dT%H:%M:%S.000"))
def monitor_local_file(self, filename):
# Check for local file changes (make some queue of these results)
filename_mtime = os.stat(filename).st_mtime
if filename in self.known_files:
self.known_files[filename][C.LOCK].acquire()
if (self.known_files[filename][C.MTIME] < filename_mtime):
self.known_files[filename][C.STATUS] = C.UPDATED
self.known_files[filename][C.MTIME] = filename_mtime
self.S3Conn.enqueue(filename, C.UPDATED)
else:
self.known_files[filename][C.STATUS] = C.UNCHANGED
self.known_files[filename][C.LOCK].release()
else: # don't have this file information stored in memory
self.known_files[filename] = [C.PNEW, filename_mtime, Lock()]
self.S3Conn.enqueue(filename, C.PNEW)
# print "Check if file is already uploaded as current version", \
# self.known_files[filename]
def monitor_cloud_files(self):
keys = self.S3Conn.get_all_keys()
# make sure that we update our known_files table view of the
# file time so that we don't continue to update
for key in keys:
print "CLOUD:", self.sdb_directory, key.name, self._lm_to_epoch(key.last_modified)
def run(self):
while True:
# figure out who's new and who's updated
self.walktree(self.sdb_directory, self.monitor_local_file)
# see if anyone needs removing
print time.time()
for f in self.known_files:
print " ", f, self.known_files.get(f)
self.monitor_cloud_files()
# uploaded_updated_files()
# self.delete_not_visited_files()
# self.reset_known_files()
time.sleep(C.IDLE_WINDOW)
| Python |
#!/usr/bin/env python
import readline
import cmd
import logging
import sys
class LockboxCommands(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = 'Lockbox > '
def do_viewlog(self): pass
# local credentials
def do_updateawsaccesskey(self): pass
def do_updateawssecretkey(self): pass
# editors
def do_addeditor(self):
pass
def do_deleditor(self):
pass
def do_modeditor(self):
pass
def do_searcheditors(self):
# see address book
pass
# viewer
def do_addviewer(self): pass
def do_delviewer(self): pass
def do_modviewer(self): pass
def do_searchviewers(self): pass
# keys
def do_genkey(self):
pass
def do_addkey(self):
"""
TODO username + location == key
"""
pass
def do_modkey(self):
# change username or location associated with key
pass
def do_delkey(self):
pass
def do_lskeys(self):
pass
# Adjust association between editor and file
def do_addeditortofile(self):
# username
pass
def do_deleditorfromfile(self):
pass
def do_lsfileeditors(self, filename):
pass
# file manipulation
def do_lsfiles(self):
pass
# top directory manipulation
def do_lstopdirs(self):
pass
def do_addtopdir(self):
pass
def do_rmtopdir(self):
pass
# web service setup
def do_cost(self):
pass
# Lockbox System testing
def do_upgradecheck(self):
pass
def do_upgradestart(self):
pass
def do_quit(self, arg):
sys.exit(1)
def help_quit(self):
print "syntax: quit",
print "-- terminates the application"
# shortcuts
do_q = do_quit
| Python |
#!/usr/bin/env python
import os
from util import log, init_dir
import M2Crypto
DECODE = 0
ENCODE = 1
def _filter_cipher(input, output, cipher):
while 1:
data = input.read(32000)
if len(data) == 0:
break
enc = cipher.update(data)
output.write(enc)
output.write(cipher.final())
output.flush()
class CryptoHelper(object):
def __init__(self, key_dir, use_default_location=True):
self.key_dir = key_dir
self._initialize_keys()
def _remove_priv_pem_headers(self, priv):
priv = priv.replace("-----BEGIN RSA PRIVATE KEY-----\n","")
priv = priv.replace("-----END RSA PRIVATE KEY-----\n","")
return priv
def _add_priv_pem_headers(self, priv):
priv = "-----BEGIN RSA PRIVATE KEY-----\n" + priv
priv = priv + "-----END RSA PRIVATE KEY-----\n"
return priv
def _initialize_keys(self):
init_dir(self.key_dir)
priv = os.path.join(self.key_dir, 'sdb.private')
pub = os.path.join(self.key_dir, 'sdb.public')
try:
self.priv_key = M2Crypto.RSA.load_key(priv)
self.pub_key = M2Crypto.RSA.load_pub_key(pub)
except:
log.warn('Failed to load keys. Regenerating...')
self.priv_key = self._generate_pki_keys(priv, pub)
mem = M2Crypto.BIO.MemoryBuffer()
self.priv_key.save_key_bio(mem, cipher=None)
self.priv_key.save_pub_key_bio(mem)
print mem.getvalue()
def _generate_pki_keys(self, privfile, pubfile):
k = M2Crypto.RSA.gen_key(2048, 11)
priv = k.as_pem(cipher=None)
print """INSERT INTO config (key, value) VALUES ("private_key",%s)""" % priv
return priv
def generate_aes_key(self):
'''Generate and return new random AES key.'''
return M2Crypto.Rand.rand_bytes(32)
def encrypt_aes_key(self, aes):
log.info("Encrypting AES key")
return self.pub_key.public_encrypt(aes, M2Crypto.RSA.pkcs1_padding)
def decrypt_aes_key(self, aes_enc):
log.info("Decrypting AES key")
return self.priv_key.private_decrypt(aes_enc, M2Crypto.RSA.pkcs1_padding)
def encrypt(self, fpin, fpout, aes_key=None):
'''
Encrypt a file object using a aes key.
fpin - file object containing the data to encrypt
fpout - file object to write encrypted data to
If no key is specified, a random key will be generated.
returns: aes_key, salt used for encryption.
'''
log.info("Encrypting file...")
if not aes_key:
aes_key = M2Crypto.Rand.rand_bytes(32)
salt = M2Crypto.Rand.rand_bytes(8)
iv = '\0' * 32
cipher = M2Crypto.EVP.Cipher(alg='aes_256_cbc', key=aes_key, iv=iv, op=ENCODE, salt=salt)
_filter_cipher(fpin, fpout, cipher)
log.info("Done.")
return aes_key, salt
def decrypt(self, fpin, fpout, aes_key, salt):
'''
Decrypt a fileobject using the given AES key and salt.
'''
log.info("Decrypting file...")
iv = '\0' * 32
cipher = M2Crypto.EVP.Cipher(alg='aes_256_cbc', key=aes_key, iv=iv, op=DECODE, salt=salt)
_filter_cipher(fpin, fpout, cipher)
log.info("Done")
def test_crypto():
import cStringIO
es = CryptoHelper(os.path.expanduser('~/.safedepositbox/keys'))
TEST_STR = '0123456789' * 100
input = cStringIO.StringIO(TEST_STR)
encrypt = cStringIO.StringIO()
decrypt = cStringIO.StringIO()
aes, salt = es.encrypt(input, encrypt)
eaes = es.encrypt_aes_key(aes)
es.decrypt_aes_key(eaes)
es.decrypt(cStringIO.StringIO(encrypt.getvalue()), decrypt, aes, salt)
assert TEST_STR == decrypt.getvalue()
| Python |
#!/usr/bin/env python
import os, re, subprocess, sys
import logging
import atexit
from logging import DEBUG, INFO, WARN, ERROR, FATAL
from time import time
from collections import defaultdict
import tempfile as tf
logging.basicConfig(stream=sys.stderr, level=logging.INFO,
format="%(levelname).1s %(filename)s:%(lineno)s -- %(message)s ")
def tempfile(conf):
return tf.TemporaryFile(dir=conf['staging_directory'])
def execute(cmd):
subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
def init_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def remove_file(f):
if os.path.exists(f):
os.remove(f)
def currentFrame():
return sys._getframe(6)
def findCaller():
f = currentFrame()
code = f.f_code
return (code.co_filename, f.f_lineno, code.co_name)
logging.root.findCaller = findCaller
class Logger():
@staticmethod
def debug(fmt, *args, **kw):
if logging.root.isEnabledFor(DEBUG):
logging.debug(fmt % args, **kw)
@staticmethod
def info(fmt, *args, **kw):
if logging.root.isEnabledFor(INFO):
logging.info(fmt % args, **kw)
@staticmethod
def warn(fmt, *args, **kw):
if logging.root.isEnabledFor(WARN):
logging.warn(fmt % args, **kw)
@staticmethod
def error(fmt, *args, **kw):
if logging.root.isEnabledFor(ERROR):
logging.error(fmt % args, **kw)
@staticmethod
def fatal(fmt, *args, **kw):
if logging.root.isEnabledFor(FATAL):
logging.fatal(fmt % args, **kw)
log = Logger()
class TimeUtil():
@staticmethod
def time(f, *args):
st = time()
r = f(*args)
ed = time()
log.info('%s: result=%s, runtime=%4.2f', f.__name__, r, ed - st)
return r
def __init__(self):
self.start()
def start(self):
self.start_time = time()
def reset(self, msg):
if msg: self.finish(msg)
self.start()
def finish(self, msg):
t = time() - self.start_time
log.info('%s: runtime: %4.2f', msg, t)
class Stat():
def __init__(self):
self.time = 0
self.count = 0
self._t = []
def start(self):
self.count += 1
self._t.append(time())
def finish(self):
v = self._t.pop()
if not self._t:
self.time += time() - v
def __repr__(self):
return '(%.2f %d)' % (self.time, self.count)
STATS = defaultdict(Stat)
class Flags():
def __init__(self):
from argparse import ArgumentParser
self.parser = ArgumentParser()
self.add_default_options()
def parse(self, argv=None):
if not argv:
argv = sys.argv
cmd = argv[0]
options = self.parser.parse_args(argv[1:])
for k in dir(options):
if not k.startswith('_'):
setattr(self, k, getattr(options, k))
logging.getLogger().level = getattr(logging, self.log_level.upper())
sys.argv = [cmd]
if self.stats:
def dump_stats():
for k, v in STATS.items():
print >>sys.stderr, '%40s :: %s' % (k, v)
atexit.register(dump_stats)
def add_default_options(self):
self.parser.add_argument("--profile", action="store_true", default=False, help="Capture profiling data.")
self.parser.add_argument("--log_level", action="store", default="info")
self.parser.add_argument("--stats", action="store_true", default=False, help="Dump statistics on pass timing.")
def add_argument(self, *args, **kw):
self.parser.add_argument(*args, **kw)
#flags = Flags()
class NamedList(object):
'''List with values accessible by both key and by index.'''
def __init__(self, keys, values):
self.list = values
self.keys = keys
if len(keys) != len(values):
raise ValueError, 'Mismatched lists to NamedList!'
self.mapping = dict([(keys[i], i) for i in range(len(keys))])
def __setitem__(self, k, v):
if not isinstance(k, int):
if not k in self.mapping:
raise KeyError, 'New values cannot be added to NamedList.'
k = self.mapping[k]
if k >= len(self.list):
raise KeyError, 'New values cannot be added to NamedList.'
self.list[k] = v
def __getitem__(self, k):
if isinstance(k, int): return self.list[k]
else: return self.list[self.mapping[k]]
def __len__(self):
return len(self.list)
def __repr__(self):
return '[' + ', '.join(['(%s, %s)' % (repr(self.keys[i]), repr(self.list[i])) for i in range(len(self.list)) ]) + ']'
def __eq__(self, other):
return self.list == other.list
def __contains__(self, k):
return k in self.keys
def shell(cmd):
f = os.popen(cmd)
r = f.read()
f.close()
return r
def hashable(v):
try:
hash(v)
return True
except:
return False
def clean_string(v):
if hasattr(v, '__name__'): v = v.__name__
v = str(v)
v = re.sub('[^a-zA-Z0-9_]', '', v)
return v.lower()[:30]
| Python |
import hashlib
import json
import os
import base64
from util import log, tempfile
class FileNotFound(Exception): pass
class PermissionDenied(Exception): pass
AESKEY_FILE = 'aes.key'
CONTENT_FILE = 'contents'
def _hash_path(filepath):
return hashlib.md5(filepath).hexdigest()
def _hash_flatten_filepath(filepath):
fphash = _hash_path(filepath)
return ".".join([fphash, os.path.split(filepath)[1]])
class AWSFileBundle(object):
'''Stores the state for a single file.
Files are encrypted and versioned; delta compression is applied based on
previous versions to reduce the amount of data stored.
'''
def __init__(self, conf, bucket, file_name, crypto_helper):
self.bucket = bucket
self.crypto = crypto_helper
self.dir = self.bucket.create_dir(_hash_path(file_name))
self.conf = conf
self.enc_aes_keys = dict()
try:
self.load_key_file()
except FileNotFound:
log.warn("Key files not present. Creating new ones.")
# Create a new key for this bucket, and upload.
self.aes_key = self.crypto.generate_aes_key()
log.info("New AES key (base64): %s" % base64.encodestring(self.aes_key))
self.enc_aes_keys[self.conf['email_address']] = base64.encodestring(self.crypto.encrypt_aes_key(self.aes_key))
self.flush_key_file()
def load_key_file(self):
'''Attempt to load the encrypted AES key for the given folder.'''
# expects that AESKEY_FILE is a json'd dictionary (i.e., argument
# for loads is a str)
key_file_str = self.dir.read(AESKEY_FILE)
if not key_file_str:
raise FileNotFound
self.enc_aes_keys = json.loads(key_file_str)
for key in self.enc_aes_keys:
self.enc_aes_keys[key] = base64.decodestring(self.enc_aes_keys[key])
if not self.conf['email_address'] in self.enc_aes_keys:
raise PermissionDenied, 'Current user cannot decrypt file %s' % self.file_name
self.aes_key = self.crypto.decrypt_aes_key(self.enc_aes_keys[self.conf['email_address']])
def flush_key_file(self):
'''Write a new keyfile containing encrypted aes keys.'''
self.dir.write(AESKEY_FILE, json.dumps(self.enc_aes_keys))
def add_key(self, userid, pubkey):
self.enc_aes_keys[userid] = pubkey.public_encrypt(self.aes_key)
self.flush_key_file()
def add_content(self, input, md5=None):
'''Write a new content entry, encrypted using the current AES key.'''
tf = tempfile(self.conf)
self.crypto.encrypt(input, tf)
tf.seek(0)
self.dir.write(CONTENT_FILE, tf.read(), md5)
del tf
def get_content(self, fp):
'''Return a file object representing the latest content for this bundle.'''
enc = tempfile(self.conf)
dec = tempfile(self.conf)
self.dir.read_to_file(CONTENT_FILE, enc)
enc.seek(0)
self.crypto.decrypt(enc, dec)
dec.seek(0)
del enc
return dec
| Python |
import os
import sqlite3
import constants as C
SQL_SCHEMA = 'SDB.sql'
class SQLiteHelper:
def __init__(self, admin_directory, reset=False):
self.db_path = os.path.join(admin_directory, C.SDB_DB_NAME)
if reset:
if os.path.exists(self.db_path):
os.remove(self.db_path)
self._initialize_db()
def _initialize_db(self):
if not os.path.exists(self.db_path):
self._create_new_admin_db()
def _get_init_script(self):
return "".join(open(SQL_SCHEMA).readlines())
def _create_new_admin_db(self):
conn = sqlite3.connect(self.db_path)
cur = conn.cursor()
cur.executescript(self._get_init_script())
conn.commit()
conn.close()
def get_config(self):
ret_dict = dict()
for conf_item in ['staging_directory',
'aws_access_key',
'aws_secret_key',
'sdb_directory',
'email_address',
'computer_name',
'public_key',
'private_key']:
ret_dict[conf_item] = self.config_get(conf_item)
return ret_dict
def config_get(self, key):
conn = sqlite3.connect(self.db_path)
ret = None
with conn:
rows = conn.execute("SELECT value FROM config WHERE key = ?", (key,))
r = rows.fetchone()
if r: ret = r[0]
conn.close()
return ret
def config_set(self, key, value):
conn = sqlite3.connect(self.db_path)
with conn:
conn.execute("REPLACE INTO config (key, value) VALUES (?,?)", (key, value))
conn.commit()
conn.close()
def set_priv_key_pem(self, priv):
"""Expects PEM (base64 string) version of a private key"""
self.config_set('private_key', priv)
def get_priv_key_pem(self):
"""Returns PEM version of a private key"""
return self.config_get('private_key')
def create_file(self, **kwargs):
pass
def update_file(self, **kwargs):
pass
def insert_user_loc_key(self, email, location, public_key):
conn = sqlite3.connect(self.db_path)
with conn:
conn.execute("INSERT OR IGNORE INTO user (email_address) VALUES (?)", (email,))
conn.execute("""INSERT INTO public_keys (user_id, location, public_key) VALUES
((SELECT id FROM user WHERE email_address = ?), ?, ?)""", (email, location, public_key))
conn.commit()
conn.close()
def share_file(self, server_path, email):
conn = sqlite3.connect(self.db_path)
with conn:
conn.execute("""INSERT INTO file_permission (user_id, file_id, permission) VALUES
((SELECT id FROM user WHERE email_address = ?),
(SELECT id FROM file_journal WHERE server_path = ?),
3)""", (email, server_path))
conn.commit()
conn.close()
def unshare_file(self, server_path, email):
conn = sqlite3.connect(self.db_path)
with conn:
conn.execute("""DELETE FROM file_permission WHERE
user_id = (SELECT id FROM user WHERE email_address = ?) AND
file_id = (SELECT id FROM file_journal WHERE server_path = ?)""", (email, server_path))
conn.commit()
conn.close()
def test_reset():
s = SDBSQLiteHelper(os.path.expanduser("~/.safedepositbox"), reset=True)
p = s.config_get('password')
assert (p == None)
def test_set_and_get():
s = SDBSQLiteHelper(os.path.expanduser("~/.safedepositbox"), reset=True)
key = 'keypassword'
value = 'valpassword'
s.config_set(key, value)
p = s.config_get(key)
assert (p == value)
def test_set_and_get_priv_pem():
s = SDBSQLiteHelper(os.path.expanduser("~/.safedepositbox"), reset=True)
priv = 'somelongstringrepresentingapemversionofanrsaprivatekey'
s.set_priv_key_pem(priv)
p = s.get_priv_key_pem()
assert (p == priv)
def test_insert_user_loc_key_insert_only():
s = SDBSQLiteHelper(os.path.expanduser("~/.safedepositbox"), reset=True)
s.insert_user_loc_key("tierney@cs.nyu.edu","Macbook Pro", "somethingthatissupposedtorepresentapublickey")
| Python |
#!/usr/bin/env python
# known_files dict of list index
STATUS = 0
MTIME = 1
LOCK = 2
# File states
NOT_VISITED = 'not visted'
UNCHANGED = 'unchanged'
UPDATED = 'updated'
PNEW = 'poss. new'
IDLE_WINDOW = 2 # sec
SDB_DB_NAME = "safedepositbox.db"
| Python |
#!/usr/bin/env python
__all__ = ['bundle',
'constants',
'crypto',
'S3',
'util',
'SQLiteHelper',
'rsync',
'setup',
]
# enable access to external libaries.
import extern
| Python |
import os
import random
import re
import string
import time
import ConfigParser
import Queue
import boto.s3
import calendar
import constants as C
import socket
from bundle import AWSFileBundle as bundler
from util import log
BUCKET_NAME_PADDING_LEN = 20
METADATA_TAG_MD5 = 'orig_file_md5'
def Policy(object):
@staticmethod
def string_to_dns(string):
# Reasonable replacements (don't know if users will hate us for this)
string = re.sub(r'[^\w.-]', '-',).strip()
# Check length of the string
string = string.lower()
string = string[:63]
if len(string) < 3:
return None
# Make sure we do not have an IP address
try:
socket.inet_aton(string)
# we have a legal ip address (so bad!)
return None
except socket.error:
# we have an invalid ip addr, so we might be okay
pass
return string
class Connection(object):
def __init__(self, conf, prefix):
self.conf = conf
self.prefix = prefix
self.bucket_name = conf.get("bucket_name")
self.staging_directory = conf.get("staging_directory")
self.aws_access_key_id = conf.get("aws_access_key")
self.aws_secret_access_key = conf.get("aws_secret_key")
self.email_address = conf.get("email_address")
self.computer_name = conf.get("computer_name")
self.queue = Queue.Queue()
self._connect()
# should check if the bucket exists in S3.
self._set_bucket(self.bucket_name)
class Directory(object):
"""
This class provides access to the 'directory' on S3. The idea
is that every file we care about in our system would have a
unique file path that it corresponds to.
"""
def __init__(self, connection, bucket, dirpath):
self.conn = connection
self.bucket = bucket
self.dir = dirpath
def list(self):
return self.bucket.get_all_keys(prefix=self.dir)
def read(self, file):
keypath = os.path.join(self.dir, file)
key = self.bucket.get_key(keypath)
if key:
return key.get_contents_as_string()
return None
def write(self, file, contentfp, md5=None):
keyname = os.path.join(self.dir, file)
log.info("keyname: %s" % keyname)
key = boto.s3.key.Key(self.bucket, keyname)
if md5: key.set_metadata(METADATA_TAG_MD5, md5)
key.set_contents_from_string(contentfp)
def create_dir(self, hashed_path_to_filename):
return self.Directory(self.conn, self.bucket, hashed_path_to_filename)
def _connect(self):
self.conn = boto.connect_s3(self.aws_access_key_id,
self.aws_secret_access_key)
def _set_bucket(self, bucket_name):
self.bucket = boto.s3.bucket.Bucket(self.conn, bucket_name)
def _create_bucket(self, bucket_name):
self.conn.create_bucket(bucket_name)
self.bucket = self.conn.get_bucket(bucket_name)
self.bucket.configure_versioning(True)
self.bucket.make_public()
pass
def create_bucket(self):
# Need to make creating a public bucket and admin bucket easy.
#
# store the bucket_name in our configuration
prefix = Policy.string_to_dns(self.prefix)
s = "".join([random.choice(string.lowercase + string.digits)
for x in range(1, BUCKET_NAME_PADDING_LEN)])
bucket_name = prefix + '.' + s
return bucket_name
#self._create_bucket(s)
def get_all_buckets(self):
return self.conn.get_all_buckets()
def get_all_keys(self):
# check if bucket exists?
return self.bucket.get_all_keys()
def send_filename(self, s3key, filename_src, file_md5):
key = boto.s3.key.Key(self.bucket, s3key)
key.set_metadata(METADATA_TAG_MD5, file_md5)
key.set_contents_from_filename(filename_src)
def get_metadata(self, s3key, metadata):
key = self.bucket.get_key(s3key)
print key.md5
return key.get_metadata(metadata)
def get_filename(self, s3key, filename_dest):
# could add a progress meter here.
key = self.bucket.get_key(s3key)
print key.last_modified
if key:
key.get_contents_to_filename(filename_dest)
return filename_dest
return None
def enqueue(self, filename, state):
self.queue.put([filename, state])
def proc_queue(self, prefix_to_ignore, crypto_helper):
while True:
filename, state = self.queue.get()
relative_filepath = filename.replace(prefix_to_ignore, '')
print 'relative_filepath:', relative_filepath
key_filename = '.'.join([relative_filepath,
self.email_address,
self.computer_name])
if C.PNEW == state:
self.pnew_key = self.bucket.get_key(key_filename)
with open(filename) as fp:
file_md5 = boto.s3.key.Key().compute_md5(fp)[0]
if not self.pnew_key: # New file when we started up
bundle_helper = bundler(self.conf, self, filename, crypto_helper)
with open(filename) as fp:
bundle_helper.add_content(fp, file_md5)
print "Exact file we expect to send:", filename
# val_filename = os.path.join(self.staging_directory, enc_filepath)
# self.send_filename(bundle_helper, filename, file_md5)
else: # Existing file. Checking if stale.
with open(filename) as fp:
md5, md5b64 = self.pnew_key.compute_md5(fp)
if self.pnew_key.get_metadata(METADATA_TAG_MD5) != md5:
enc_filepath = crypto_helper.bundle(filename)
val_filename = os.path.join(self.staging_directory, enc_filepath)
self.send_filename(key_filename, val_filename, file_md5)
if C.UPDATED == state:
with open(filename) as fp:
md5, md5b64 = boto.s3.key.Key().compute_md5(fp)
enc_filepath = crypto_helper.bundle(filename)
val_filename = os.path.join(self.staging_directory, enc_filepath)
self.send_filename(key_filename, val_filename, md5)
if C.NOT_VISITED == state:
# delete file(s)...
relative_filepath = filename.replace(prefix_to_ignore, '')
keys = self.bucket.get_all_keys(prefix=relative_filepath)
for key in keys:
self.bucket.delete_key(key)
self.queue.task_done()
def main():
# User must setup an AWS account
cp = ConfigParser.ConfigParser()
cp.read(os.path.expanduser('~/.safe-deposit-box/test.cfg'))
from config import Config
conf = Config(user_id='test@test.com',
access_key=cp.get('aws', 'access_key_id'),
secret_key=cp.get('aws', 'secret_access_key'),
staging_dir='/tmp',
bucket='safe-deposit-box')
b = Connection(conf, prefix='/data')
print b.get_all_buckets()
for k in b.get_all_keys():
mtime = k.last_modified
print mtime
print time.strptime(mtime.replace("Z", ''), u"%Y-%m-%dT%H:%M:%S.000")
print calendar.timegm(time.strptime(mtime.replace("Z", ''), u"%Y-%m-%dT%H:%M:%S.000"))
print " ", k, mtime
b.create_bucket()
b.send_filename('DESIGN', 'DESIGN', md5)
def test_string_to_dns():
print Policy.string_to_dns("he")
print Policy.string_to_dns("he ")
print Policy.string_to_dns("hello worlds")
print Policy.string_to_dns("hello worlds!")
print Policy.string_to_dns("hello worlds-")
print Policy.string_to_dns("hello's worlds-")
print Policy.string_to_dns("hello's worlds---")
print Policy.string_to_dns("hello\"s worlds---")
print Policy.string_to_dns("Matt Tierney's Bronx iMac " * 10)
print Policy.string_to_dns("140.247.61.26")
print Policy.string_to_dns("277.247.61.26")
print Policy.string_to_dns("I-.-.-like--.three.dots")
print Policy.string_to_dns("I.like.three.dots")
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
| Python |
#!/usr/bin/env python
import boto
conn = boto.connect_sdb("AKIAJUHIZBILIEB4IOVA", "JpgOeEjrOC2q9Qf2XuLxrAZnW2iQ0rk762EGAzXv")
try:
domain = conn.get_domain('sdb')
except SDBResponseError:
domain = conn.create_domain('sdb')
data = {}
data['some-item'] = {'color':'blue','price':10,'size':'small'}
data['some-other-item'] = {'color':'red','price':15,'size':'medium'}
data['bicolored'] = {'color':['blue','green'],'price':15,'size':'small'}
data['tricolored'] = {'color':['red','blue','green'],'price':20,'size':'medium'}
data['nocolor'] = {'price':10,'size':'small'}
data['another-color'] = {'color':'purple','price':5,'size':'tiny',
'comment':'This one is really small'}
# Insert the items
for name,d in data.items():
item = domain.new_item(name)
for k,v in d.items():
item[k] = v
item.save()
# inode numbers are unique per filesystem. bundle the computer name
# and the filesystem (computer-name, inode) represents equivalent
# across different machines
#
# checkpoints
# deltas
# rsync checksums, hashes
#
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a pure Python implementation of the [rsync algorithm](TM96).
[TM96] Andrew Tridgell and Paul Mackerras. The rsync algorithm.
Technical Report TR-CS-96-05, Canberra 0200 ACT, Australia, 1996.
http://samba.anu.edu.au/rsync/.
### Example Use Case: ###
# On the system containing the file that needs to be patched
>>> unpatched = open("unpatched.file", "rb")
>>> hashes = blockchecksums(unpatched)
# On the remote system after having received `hashes`
>>> patchedfile = open("patched.file", "rb")
>>> delta = rsyncdelta(patchedfile, hashes)
# System with the unpatched file after receiving `delta`
>>> unpatched.seek(0)
>>> save_to = open("locally-patched.file", "wb")
>>> patchstream(unpatched, save_to, delta)
"""
import collections
import hashlib
if not(hasattr(__builtins__, "bytes")) or str is bytes:
# Python 2.x compatibility
def bytes(var, *args):
try:
return ''.join(map(chr, var))
except TypeError:
return map(ord, var)
__all__ = ["rollingchecksum", "weakchecksum", "patchstream", "rsyncdelta",
"blockchecksums"]
def rsyncdelta(datastream, remotesignatures, blocksize=4096):
"""
Generates a binary patch when supplied with the weak and strong
hashes from an unpatched target and a readable stream for the
up-to-date data. The blocksize must be the same as the value
used to generate remotesignatures.
"""
remote_weak, remote_strong = remotesignatures
match = True
matchblock = -1
deltaqueue = collections.deque()
while True:
if match and datastream is not None:
# Whenever there is a match or the loop is running for the first
# time, populate the window using weakchecksum instead of rolling
# through every single byte which takes at least twice as long.
window = collections.deque(bytes(datastream.read(blocksize)))
checksum, a, b = weakchecksum(window)
try:
# If there are two identical weak checksums in a file, and the
# matching strong hash does not occur at the first match, it will
# be missed and the data sent over. May fix eventually, but this
# problem arises very rarely.
matchblock = remote_weak.index(checksum, matchblock + 1)
stronghash = hashlib.md5(bytes(window)).hexdigest()
matchblock = remote_strong.index(stronghash, matchblock)
match = True
deltaqueue.append(matchblock)
if datastream.closed:
break
continue
except ValueError:
# The weakchecksum did not match
match = False
try:
if datastream:
# Get the next byte and affix to the window
newbyte = ord(datastream.read(1))
window.append(newbyte)
except TypeError:
# No more data from the file; the window will slowly shrink.
# newbyte needs to be zero from here on to keep the checksum
# correct.
newbyte = 0
tailsize = datastream.tell() % blocksize
datastream = None
if datastream is None and len(window) <= tailsize:
# The likelihood that any blocks will match after this is
# nearly nil so call it quits.
deltaqueue.append(window)
break
# Yank off the extra byte and calculate the new window checksum
oldbyte = window.popleft()
checksum, a, b = rollingchecksum(oldbyte, newbyte, a, b, blocksize)
# Add the old byte the file delta. This is data that was not found
# inside of a matching block so it needs to be sent to the target.
try:
deltaqueue[-1].append(oldbyte)
except (AttributeError, IndexError):
deltaqueue.append([oldbyte])
# Return a delta that starts with the blocksize and converts all iterables
# to bytes.
deltastructure = [blocksize]
for element in deltaqueue:
if isinstance(element, int):
deltastructure.append(element)
elif element:
deltastructure.append(bytes(element))
return deltastructure
def blockchecksums(instream, blocksize=4096):
"""
Returns a list of weak and strong hashes for each block of the
defined size for the given data stream.
"""
weakhashes = list()
stronghashes = list()
read = instream.read(blocksize)
while read:
weakhashes.append(weakchecksum(bytes(read))[0])
stronghashes.append(hashlib.md5(read).hexdigest())
read = instream.read(blocksize)
return weakhashes, stronghashes
def patchstream(instream, outstream, delta):
"""
Patches instream using the supplied delta and write the resultant
data to outstream.
tierney: `delta` contents. delta[0] represents block size. For i != 0,
delta[i] could mean one of two things. If delta[i] is an int, then
it means that corresponding block is not different. Otherwise (if
delta[i] is not an int), then the contents are the data that
should be replaced for that patch.
"""
blocksize = delta[0]
for element in delta[1:]:
if isinstance(element, int) and blocksize:
instream.seek(element * blocksize)
element = instream.read(blocksize)
outstream.write(element)
def rollingchecksum(removed, new, a, b, blocksize=4096):
"""
Generates a new weak checksum when supplied with the internal state
of the checksum calculation for the previous window, the removed
byte, and the added byte.
"""
a -= removed - new
b -= removed * blocksize - a
return (b << 16) | a, a, b
def weakchecksum(data):
"""
Generates a weak checksum from an iterable set of bytes.
"""
a = b = 0
l = len(data)
for i in range(l):
a += data[i]
b += (l - i)*data[i]
return (b << 16) | a, a, b
# def test_blockchecksums0():
# unpatched = open("/home/tierney/src/safe-deposit-box/src/SafeDepositBox/50MB.txt","rb")
# hashes = blockchecksums(unpatched, blocksize=4194304)
# return hashes
# def test_blockchecksums1():
# unpatched = open("/home/tierney/src/safe-deposit-box/src/SafeDepositBox/4MB.txt","rb")
# hashes = blockchecksums(unpatched, blocksize=4 * (2 ** 20))
# return hashes
# def test_patchedfile():
# unpatched = open("/home/tierney/src/safe-deposit-box/src/SafeDepositBox/4MB.txt","rb")
# hashes = blockchecksums(unpatched, blocksize=4194304)
# patchedfile = open("/home/tierney/src/safe-deposit-box/src/SafeDepositBox/4MBpatched.txt","rb")
# delta = rsyncdelta(patchedfile, hashes)
# print delta
# def test_patchedfile0():
# unpatched = open("export0.pdf","rb")
# blocksize=8 * (2 ** 10)
# hashes = blockchecksums(unpatched, blocksize=blocksize)
# import pprint
# pprint.pprint(hashes)
# patchedfile = open("export1.pdf","rb")
# delta = rsyncdelta(patchedfile, hashes, blocksize)
# with open("export1-new.pdf","w") as fh:
# patchstream(unpatched, fh, delta)
# print
# print len(delta)
# print
# print delta.__sizeof__()
# for delt in delta:
# if type(delt) == type(0):
# pass #print "INT:", delt
# else:
# print "list?:", len(delt), delt.__sizeof__()
# def test_patchedfile1():
# unpFile = "/home/tierney/src/safe-deposit-box/src/SafeDepositBox/4MB.txt"
# pFile = "/home/tierney/src/safe-deposit-box/src/SafeDepositBox/4MBpatched.txt"
# out = "4MBpatched-new.txt"
# # unpFile = "export0.pdf"
# # pFile = "export1.pdf"
# # out = "export-new.pdf"
# blocksize= 8 * (2 ** 10)
# unpatched = open(unpFile,"rb")
# hashes = blockchecksums(unpatched, blocksize=blocksize)
# import pprint
# # pprint.pprint(hashes[0])
# # print len(hashes[0]), hashes[0].__sizeof__() #len(hashes[1]), hashes[1].__sizeof__()
# # pprint.pprint(hashes[1])
# # print len(hashes[1]), hashes[1].__sizeof__() #len(hashes[1]), hashes[1].__sizeof__()
# print len(hashes[0]), len(hashes[1])
# patchedfile = open(pFile,"rb")
# delta = rsyncdelta(patchedfile, hashes, blocksize)
# print delta
# print len(delta)
# with open(out,"w") as fh:
# patchstream(unpatched, fh, delta)
# print
# print delta.__sizeof__()
# for delt in delta:
# if not isinstance(delt, int):
# print "list?:", len(delt), delt.__sizeof__()
def iosprint(fin):
print fin.readlines()
fin.seek(0)
if __name__ == "__main__":
# test_patchedfile1()
# import sys
# sys.exit(0)
import random
import time
xrange = range
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
# Generates random data for the test
datasize = 1<<16
datasize = 4 * (2 ** 20)
targetdata = ''.join([chr(random.randint(0, 127)) for n in range(datasize)])
chunks = [targetdata[i:i+2048] for i in xrange(0, 1<<17, 2048)]
for i in xrange(8):
a, b = (
random.randrange(0, len(chunks)), random.randrange(0, len(chunks)))
chunks[a], chunks[b] = chunks[b], chunks[a]
hostdata = ''.join(chunks)
# targetstream: file to be patched (original)
# hoststream: what the unpatched target needs to become (newer version)
# mergedstream (patcheddata): output after patching
# Python 3 bytes compatibility
mergedstream = StringIO()
if __builtins__.bytes == str:
targetstream = StringIO(targetdata)
hoststream = StringIO(hostdata)
else:
targetstream = StringIO(bytes(targetdata, "ascii"))
hoststream = StringIO(bytes(hostdata, "ascii"))
targetchecksums = blockchecksums(targetstream)
binarypatch = rsyncdelta(hoststream, targetchecksums)
patchstream(targetstream, mergedstream, binarypatch)
mergedstream.seek(0)
patcheddata = mergedstream.read()
if __builtins__.bytes == str:
print "assume bytes means str"
assert patcheddata == hostdata
else:
print "not assuming bytes means str"
assert str(patcheddata, 'ascii') == hostdata
print("Test passed.")
| Python |
#!/usr/bin/env python
import M2Crypto
def ocb():
pass
k = M2Crypto.RSA.gen_key(2048,11, ocb)
| Python |
#!/usr/bin/env python
import os
import stat
for root, dirs, files in os.walk(os.path.expanduser("~/Dropbox/GoodReader")):
st = os.stat(root)
print os.path.isdir(root), st.st_ino, st.st_mtime, root
for fi in files:
print " ", fi
| Python |
#!/usr/bin/env python
import xdelta3
| Python |
#!/usr/bin/env python
import os
import base64
import hashlib
import zlib
import time
BLOCK_SIZE = 4194304 # bytes (4 MB)
def time_compression(data, lib, level):
start = time.time()
out = lib.compress(data, level)
finish = time.time()
print lib.__name__, level, (finish - start), float(len(out)) / len(data)
f = open(os.path.expanduser("~/Dropbox/bigfile.txt"))
s = f.read(BLOCK_SIZE)
# for lib in [zlib, bz2]:
# for level in range(1,10):
# time_compression(s, lib, level)
zc = zlib.compress(s,1)
print zc
print len(zc)
#print len(bz2.compress(s,7))
print len(s)
print len(base64.encodestring(s))
print hashlib.md5(s).hexdigest()
| Python |
#
# SDBStatusBar.py
# SafeDepositBox
#
# Created by Matt Tierney on 2/28/11.
# Copyright (c) 2011 NYU. All rights reserved.
#
from Foundation import *
from AppKit import *
from SafeDepositBox import SafeDepositBox
from threading import Thread
start_time = NSDate.date()
class SDBStatusBar(NSObject):
statusbar = None
image = None
def doDisplay(self, sender):
NSLog("doDisplay called")
s = SafeDepositBox()
Thread(target=s.s3bucket.proc_queue, args=(s.prefix_to_ignore,
s.enc_service)).start()
s.start()
self.counter = 0
self.statusbar = NSStatusBar.systemStatusBar()
# Create the statusbar item
self.statusitem = self.statusbar.statusItemWithLength_(NSVariableStatusItemLength)
# Load all images
path = NSBundle.mainBundle().pathForImageResource_("safe3.png")
self.image = NSImage.alloc().initWithContentsOfFile_(path)
print self.image
# Set initial image
self.statusitem.setImage_(self.image)
# Let it highlight upon clicking
self.statusitem.setHighlightMode_(1)
# Set a tooltip
self.statusitem.setToolTip_('Safe Deposit Box 0.1\n(Yay! Values in Technology =)')
self._build_menu(sender)
self.timer = NSTimer.alloc().initWithFireDate_interval_target_selector_userInfo_repeats_(
start_time,
1.0,
self,
'tick:',
None,
True)
NSRunLoop.currentRunLoop().addTimer_forMode_(self.timer, NSDefaultRunLoopMode)
self.timer.fire()
def tick_(self, notification):
self.counter += 1
self._build_menu(self.counter)
print "We ticked!", self.counter
def launchsite_(self, notification):
print "launch browser for our website"
def preferences_(self, notification):
print "preferences menu"
def help_(self, notification):
print "help menu"
def _build_menu(self, notification):
self.menu = NSMenu.alloc()
self.menu.init()
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Open SafeDepositBox Folder', 'openfolder:', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Launch SafeDepositBox Website', 'launchsite:', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.separatorItem()
self.menu.addItem_(menuitem)
if type(1) == type(notification):
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('%s GB used on Amazon\'s S3' % str(3.14 + .01*float(notification)), '', '')
else:
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Calculating usage...', '', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Estimated cost: $%s' % str("0.01"), '', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.separatorItem()
self.menu.addItem_(menuitem)
# Sync event is bound to sync_ method
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('All files up to date','','')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.separatorItem()
self.menu.addItem_(menuitem)
# Default event
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'terminate:', '')
self.menu.addItem_(menuitem)
# Bind it to the status item
self.statusitem.setMenu_(self.menu)
| Python |
#
# main.py
# SafeDepositBox
#
# Created by Matt Tierney on 2/27/11.
# Copyright NYU 2011. All rights reserved.
#
#import modules required by application
import objc
#import Foundation
#import AppKit
from Foundation import *
from AppKit import *
from PyObjCTools import AppHelper
# import modules containing classes required to start application and load MainMenu.nib
import SafeDepositBoxAppDelegate
import os
# bummer that this is causing me grief.
app = NSApplication.sharedApplication()
#admin_directory = os.path.join(os.environ["HOME"], ".safedepositbox")
#config_filepath = os.path.join(admin_directory, "safedepositbox.conf")
#if not os.path.exists(config_filepath):
# NSBundle.loadNibNamed_owner_("MainMenu", NSApp)
#else:
delegate = SafeDepositBoxAppDelegate.SafeDepositBoxAppDelegate.alloc().init()
app.setDelegate_(delegate)
# pass control to AppKit
AppHelper.runEventLoop() | Python |
#
# SetupWindowController.py
# SafeDepositBox
#
# Created by Matt Tierney on 2/27/11.
# Copyright (c) 2011 NYU. All rights reserved.
#
from objc import YES, NO, IBAction, IBOutlet
from Foundation import *
from AppKit import *
import ConfigParser
import os
import SafeDepositBoxAppDelegate
from SDBStatusBar import SDBStatusBar
class SetupWindowController(NSWindowController):
"""
Make sure that the nib/xib that contains our setup menu is NOT set to be
visible when our application launches.
"""
firstName = IBOutlet()
lastName = IBOutlet()
userEmailAddress = IBOutlet()
userPassword = IBOutlet()
verifyPassword = IBOutlet()
awsAccessKey = IBOutlet()
awsSecretKey = IBOutlet()
computerName = IBOutlet()
def awakeFromNib(self):
NSLog("Check if we already have a conf file.")
admin_directory = os.path.join(os.environ["HOME"], ".safedepositbox")
config_filepath = os.path.join(admin_directory, "safedepositbox.conf")
# Check if configuration file exists and either create it with user data
# or launch the statusbar.
if not os.path.exists(config_filepath):
wc = self.initWithWindowNibName_("MainMenu")
wc.showWindow_(self)
else:
SDBStatusBar.alloc().doDisplay(self)
@IBAction
def saveClose_(self, sender):
# Logic for controlling the setup menu.
values = { 'firstName' : self.firstName.stringValue(),
'lastName': self.lastName.stringValue(),
'userEmailAddress' : self.userEmailAddress.stringValue(),
'userPassword' : self.userPassword.stringValue(),
'verifyPassword' : self.verifyPassword.stringValue(),
'awsAccessKey' : self.awsAccessKey.stringValue(),
'awsSecretKey' : self.awsSecretKey.stringValue(),
'computerName' : self.computerName.stringValue(),
'sdbDirectory' : os.path.join(os.environ["HOME"],
"SafeDepositBox"),
}
rcp = ConfigParser.RawConfigParser()
section = "sdb"
rcp.add_section(section)
bClose = True
for key in values:
val = values.get(key)
if not val:
bClose = False
rcp.set(section, key, val)
if (bClose and
(values.get('userPassword') != values.get('verifyPassword'))):
# Tell user that their passwords don't match (show highlighted box?)
bClose = False
if bClose:
# Create Admin Directory
admin_directory = os.path.join(os.environ["HOME"],
".safedepositbox")
if not os.path.exists(admin_directory):
os.mkdir(admin_directory)
elif not os.path.isdir(admin_directory):
os.remove(admin_directory)
os.mkdir(admIn_directory)
config_filepath = os.path.join(admin_directory,
"safedepositbox.conf")
with open(config_filepath,'w') as fh:
rcp.write(fh)
NSLog("Wrote configuration file!")
# Show status bar now that we have initialized everything.
SDBStatusBar.alloc().doDisplay(self)
self.close()
@IBAction
def updateField_(self, sender):
print "Updating Field value: %s" % sender.stringValue()
| Python |
#
# SafeDepositBoxAppDelegate.py
# SafeDepositBox
#
# Created by Matt Tierney on 2/27/11.
# Copyright NYU 2011. All rights reserved.
#
from objc import YES, NO, IBAction, IBOutlet
from Foundation import *
from AppKit import *
from SDBStatusBar import SDBStatusBar
import SetupWindowController
import os
start_time = NSDate.date()
class SafeDepositBoxAppDelegate(NSObject):
def applicationDidFinishLaunching_(self, sender):
NSLog("Application did finish launching.")
NSBundle.loadNibNamed_owner_("MainMenu", NSApp)
| Python |
#!/usr/bin/env python
import cython
"""
This is a pure Python implementation of the [rsync algorithm](TM96).
[TM96] Andrew Tridgell and Paul Mackerras. The rsync algorithm.
Technical Report TR-CS-96-05, Canberra 0200 ACT, Australia, 1996.
http://samba.anu.edu.au/rsync/.
### Example Use Case: ###
# On the system containing the file that needs to be patched
>>> unpatched = open("unpatched.file", "rb")
>>> hashes = blockchecksums(unpatched)
# On the remote system after having received `hashes`
>>> patchedfile = open("patched.file", "rb")
>>> delta = rsyncdelta(patchedfile, hashes)
# System with the unpatched file after receiving `delta`
>>> unpatched.seek(0)
>>> save_to = open("locally-patched.file", "wb")
>>> patchstream(unpatched, save_to, delta)
"""
import collections
import hashlib
__all__ = ["rollingchecksum", "weakchecksum", "patchstream", "rsyncdelta",
"blockchecksums"]
def rsyncdelta(datastream, remotesignatures, blocksize=4096):
"""
Generates a binary patch when supplied with the weak and strong
hashes from an unpatched target and a readable stream for the
up-to-date data. The blocksize must be the same as the value
used to generate remotesignatures.
"""
remote_weak, remote_strong = remotesignatures
match = True
matchblock = -1
deltaqueue = collections.deque()
while True:
if match and datastream is not None:
# Whenever there is a match or the loop is running for the first
# time, populate the window using weakchecksum instead of rolling
# through every single byte which takes at least twice as long.
window = collections.deque(bytes(datastream.read(blocksize)))
checksum, a, b = weakchecksum(window)
try:
# If there are two identical weak checksums in a file, and the
# matching strong hash does not occur at the first match, it will
# be missed and the data sent over. May fix eventually, but this
# problem arises very rarely.
matchblock = remote_weak.index(checksum, matchblock + 1)
stronghash = hashlib.md5(bytes(window)).hexdigest()
matchblock = remote_strong.index(stronghash, matchblock)
match = True
deltaqueue.append(matchblock)
if datastream.closed:
break
continue
except ValueError:
# The weakchecksum did not match
match = False
try:
if datastream:
# Get the next byte and affix to the window
newbyte = ord(datastream.read(1))
window.append(newbyte)
except TypeError:
# No more data from the file; the window will slowly shrink.
# newbyte needs to be zero from here on to keep the checksum
# correct.
newbyte = 0
tailsize = datastream.tell() % blocksize
datastream = None
if datastream is None and len(window) <= tailsize:
# The likelihood that any blocks will match after this is
# nearly nil so call it quits.
deltaqueue.append(window)
break
# Yank off the extra byte and calculate the new window checksum
oldbyte = window.popleft()
checksum, a, b = rollingchecksum(oldbyte, newbyte, a, b, blocksize)
# Add the old byte the file delta. This is data that was not found
# inside of a matching block so it needs to be sent to the target.
try:
deltaqueue[-1].append(oldbyte)
except (AttributeError, IndexError):
deltaqueue.append([oldbyte])
# Return a delta that starts with the blocksize and converts all iterables
# to bytes.
deltastructure = [blocksize]
for element in deltaqueue:
if isinstance(element, int):
deltastructure.append(element)
elif element:
deltastructure.append(bytes(element))
return deltastructure
def blockchecksums(instream, blocksize=4096):
"""
Returns a list of weak and strong hashes for each block of the
defined size for the given data stream.
"""
weakhashes = list()
stronghashes = list()
read = instream.read(blocksize)
while read:
weakhashes.append(weakchecksum(bytes(read))[0])
stronghashes.append(hashlib.md5(read).hexdigest())
read = instream.read(blocksize)
return weakhashes, stronghashes
def patchstream(instream, outstream, delta):
"""
Patches instream using the supplied delta and write the resultantant
data to outstream.
"""
blocksize = delta[0]
for element in delta[1:]:
if isinstance(element, int) and blocksize:
instream.seek(element * blocksize)
element = instream.read(blocksize)
outstream.write(element)
cdef rollingchecksum(int removed, int new, int a, int b, int blocksize=4096):
"""
Generates a new weak checksum when supplied with the internal state
of the checksum calculation for the previous window, the removed
byte, and the added byte.
"""
a -= removed - new
b -= removed * blocksize - a
return (b << 16) | a, a, b
def weakchecksum(data):
"""
Generates a weak checksum from an iterable set of bytes.
"""
a = b = 0
l = len(data)
for i in range(l):
a += data[i]
b += (l - i)*data[i]
return (b << 16) | a, a, b
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import cython
"""
This is a pure Python implementation of the [rsync algorithm](TM96).
[TM96] Andrew Tridgell and Paul Mackerras. The rsync algorithm.
Technical Report TR-CS-96-05, Canberra 0200 ACT, Australia, 1996.
http://samba.anu.edu.au/rsync/.
### Example Use Case: ###
# On the system containing the file that needs to be patched
>>> unpatched = open("unpatched.file", "rb")
>>> hashes = blockchecksums(unpatched)
# On the remote system after having received `hashes`
>>> patchedfile = open("patched.file", "rb")
>>> delta = rsyncdelta(patchedfile, hashes)
# System with the unpatched file after receiving `delta`
>>> unpatched.seek(0)
>>> save_to = open("locally-patched.file", "wb")
>>> patchstream(unpatched, save_to, delta)
"""
import collections
import hashlib
if not(hasattr(__builtins__, "bytes")) or str is bytes:
# Python 2.x compatibility
def bytes(var, *args):
try:
return ''.join(map(chr, var))
except TypeError:
return map(ord, var)
__all__ = ["rollingchecksum", "weakchecksum", "patchstream", "rsyncdelta",
"blockchecksums"]
def rsyncdelta(datastream, remotesignatures, blocksize=4096):
"""
Generates a binary patch when supplied with the weak and strong
hashes from an unpatched target and a readable stream for the
up-to-date data. The blocksize must be the same as the value
used to generate remotesignatures.
"""
remote_weak, remote_strong = remotesignatures
match = True
matchblock = -1
deltaqueue = collections.deque()
while True:
if match and datastream is not None:
# Whenever there is a match or the loop is running for the first
# time, populate the window using weakchecksum instead of rolling
# through every single byte which takes at least twice as long.
window = collections.deque(bytes(datastream.read(blocksize)))
checksum, a, b = weakchecksum(window)
try:
# If there are two identical weak checksums in a file, and the
# matching strong hash does not occur at the first match, it will
# be missed and the data sent over. May fix eventually, but this
# problem arises very rarely.
matchblock = remote_weak.index(checksum, matchblock + 1)
stronghash = hashlib.md5(bytes(window)).hexdigest()
matchblock = remote_strong.index(stronghash, matchblock)
match = True
deltaqueue.append(matchblock)
if datastream.closed:
break
continue
except ValueError:
# The weakchecksum did not match
match = False
try:
if datastream:
# Get the next byte and affix to the window
newbyte = ord(datastream.read(1)) # tierney ord
window.append(newbyte)
except TypeError:
# No more data from the file; the window will slowly shrink.
# newbyte needs to be zero from here on to keep the checksum
# correct.
newbyte = 0
tailsize = datastream.tell() % blocksize
datastream = None
if datastream is None and len(window) <= tailsize:
# The likelihood that any blocks will match after this is
# nearly nil so call it quits.
deltaqueue.append(window)
break
# Yank off the extra byte and calculate the new window checksum
oldbyte = window.popleft()
# print 'oldbyte:', oldbyte
# print 'newbyte:', newbyte
checksum, a, b = rollingchecksum(oldbyte, newbyte, a, b, blocksize)
# Add the old byte the file delta. This is data that was not found
# inside of a matching block so it needs to be sent to the target.
try:
deltaqueue[-1].append(oldbyte)
except (AttributeError, IndexError):
deltaqueue.append([oldbyte])
# Return a delta that starts with the blocksize and converts all iterables
# to bytes.
deltastructure = [blocksize]
for element in deltaqueue:
if isinstance(element, int):
deltastructure.append(element)
elif element:
deltastructure.append(bytes(element))
return deltastructure
def blockchecksums(instream, blocksize=4096):
"""
Returns a list of weak and strong hashes for each block of the
defined size for the given data stream.
"""
weakhashes = list()
stronghashes = list()
read = instream.read(blocksize)
while read:
weakhashes.append(weakchecksum(bytes(read))[0])
stronghashes.append(hashlib.md5(read).hexdigest())
read = instream.read(blocksize)
return weakhashes, stronghashes
def patchstream(instream, outstream, delta):
"""
Patches instream using the supplied delta and write the resultant
data to outstream.
tierney: `delta` contents. delta[0] represents block size. For i != 0,
delta[i] could mean one of two things. If delta[i] is an int, then
it means that corresponding block is not different. Otherwise (if
delta[i] is not an int), then the contents are the data that
should be replaced for that patch.
"""
blocksize = delta[0]
for element in delta[1:]:
if isinstance(element, int) and blocksize:
instream.seek(element * blocksize)
element = instream.read(blocksize)
outstream.write(element)
def rollingchecksum(removed, new, a, b, blocksize=4096):
"""
Generates a new weak checksum when supplied with the internal state
of the checksum calculation for the previous window, the removed
byte, and the added byte.
"""
a -= removed - new
b -= removed * blocksize - a
return (b << 16) | a, a, b
def weakchecksum(data):
"""
Generates a weak checksum from an iterable set of bytes.
"""
a = b = 0
l = len(data)
for i in range(l):
a += data[i]
b += (l - i)*data[i]
return (b << 16) | a, a, b
# def test_blockchecksums0():
# unpatched = open("/home/tierney/src/safe-deposit-box/src/SafeDepositBox/50MB.txt","rb")
# hashes = blockchecksums(unpatched, blocksize=4194304)
# return hashes
# def test_blockchecksums1():
# unpatched = open("/home/tierney/src/safe-deposit-box/src/SafeDepositBox/4MB.txt","rb")
# hashes = blockchecksums(unpatched, blocksize=4 * (2 ** 20))
# return hashes
# def test_patchedfile():
# unpatched = open("/home/tierney/src/safe-deposit-box/src/SafeDepositBox/4MB.txt","rb")
# hashes = blockchecksums(unpatched, blocksize=4194304)
# patchedfile = open("/home/tierney/src/safe-deposit-box/src/SafeDepositBox/4MBpatched.txt","rb")
# delta = rsyncdelta(patchedfile, hashes)
# print delta
# def test_patchedfile0():
# unpatched = open("export0.pdf","rb")
# blocksize=8 * (2 ** 10)
# hashes = blockchecksums(unpatched, blocksize=blocksize)
# import pprint
# pprint.pprint(hashes)
# patchedfile = open("export1.pdf","rb")
# delta = rsyncdelta(patchedfile, hashes, blocksize)
# with open("export1-new.pdf","w") as fh:
# patchstream(unpatched, fh, delta)
# print
# print len(delta)
# print
# print delta.__sizeof__()
# for delt in delta:
# if type(delt) == type(0):
# pass #print "INT:", delt
# else:
# print "list?:", len(delt), delt.__sizeof__()
# def test_patchedfile1():
# unpFile = "/home/tierney/src/safe-deposit-box/src/SafeDepositBox/4MB.txt"
# pFile = "/home/tierney/src/safe-deposit-box/src/SafeDepositBox/4MBpatched.txt"
# out = "4MBpatched-new.txt"
# # unpFile = "export0.pdf"
# # pFile = "export1.pdf"
# # out = "export-new.pdf"
# blocksize= 8 * (2 ** 10)
# unpatched = open(unpFile,"rb")
# hashes = blockchecksums(unpatched, blocksize=blocksize)
# import pprint
# # pprint.pprint(hashes[0])
# # print len(hashes[0]), hashes[0].__sizeof__() #len(hashes[1]), hashes[1].__sizeof__()
# # pprint.pprint(hashes[1])
# # print len(hashes[1]), hashes[1].__sizeof__() #len(hashes[1]), hashes[1].__sizeof__()
# print len(hashes[0]), len(hashes[1])
# patchedfile = open(pFile,"rb")
# delta = rsyncdelta(patchedfile, hashes, blocksize)
# print delta
# print len(delta)
# with open(out,"w") as fh:
# patchstream(unpatched, fh, delta)
# print
# print delta.__sizeof__()
# for delt in delta:
# if not isinstance(delt, int):
# print "list?:", len(delt), delt.__sizeof__()
def iosprint(fin):
print fin.readlines()
fin.seek(0)
if __name__ == "__main__":
# test_patchedfile1()
# import sys
# sys.exit(0)
import random
import time
xrange = range
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
# Generates random data for the test
datasize = 1<<16
datasize = 1 * (2 ** 10)
targetdata = ''.join([chr(random.randint(0, 127)) for n in range(datasize)])
chunks = [targetdata[i:i+2048] for i in xrange(0, 1<<17, 2048)]
for i in xrange(8):
a, b = (
random.randrange(0, len(chunks)), random.randrange(0, len(chunks)))
chunks[a], chunks[b] = chunks[b], chunks[a]
hostdata = ''.join(chunks)
# targetstream: file to be patched
# hoststream: what the unpatched target needs to become
# mergedstream: output after patching
# Python 3 bytes compatibility
mergedstream = StringIO()
if __builtins__.bytes == str:
targetstream = StringIO(targetdata)
hoststream = StringIO(hostdata)
else:
targetstream = StringIO(bytes(targetdata, "ascii"))
hoststream = StringIO(bytes(hostdata, "ascii"))
targetchecksums = blockchecksums(targetstream)
binarypatch = rsyncdelta(hoststream, targetchecksums)
print binarypatch
patchstream(targetstream, mergedstream, binarypatch)
mergedstream.seek(0)
patcheddata = mergedstream.read()
if __builtins__.bytes == str:
# print "assume bytes means str"
assert patcheddata == hostdata
else:
# print "not assuming bytes means str"
assert str(patcheddata, 'ascii') == hostdata
print("Test passed.")
| Python |
#!/usr/bin/env python
import objc
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
start_time = NSDate.date()
class Timer(NSObject):
'''
Application delegate
'''
statusbar = None
def applicationDidFinishLaunching_(self, notification):
print 'timer launched'
# Make the statusbar item
statusbar = NSStatusBar.systemStatusBar()
# if you use an icon, the length can be NSSquareStatusItemLength
statusitem = statusbar.statusItemWithLength_(NSVariableStatusItemLength)
self.statusitem = statusitem # Need to retain this for later
# statusitem.setImage_(some_image)
#statusitem.setMenu_(some_menu)
statusitem.setToolTip_('Seconds since startup')
statusitem.setAction_('terminate:') # must have some way to exit
self.timer = NSTimer.alloc().initWithFireDate_interval_target_selector_userInfo_repeats_(
start_time,
1.0,
self,
'display:',
None,
True
)
NSRunLoop.currentRunLoop().addTimer_forMode_(self.timer, NSDefaultRunLoopMode)
self.timer.fire()
def display_(self, notification):
print 'display:'
self.statusitem.setTitle_(elapsed())
def elapsed():
return str(int(NSDate.date().timeIntervalSinceDate_(start_time)))
if __name__ == "__main__":
app = NSApplication.sharedApplication()
delegate = Timer.alloc().init()
app.setDelegate_(delegate)
AppHelper.runEventLoop()
| Python |
import objc, re, os
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
status_images = {'sdb':'safe3.png'}
# Create syncing images...
start_time = NSDate.date()
class StatusBar(NSObject):
images = {}
statusbar = None
state = 'sdb'
def openfolder_(self, notification):
print "open folder"
def launchsite_(self, notification):
print "launch browser for our website"
def preferences_(self, notification):
print "preferences menu"
def help_(self, notification):
print "help menu"
def _build_menu(self, notification):
self.menu = NSMenu.alloc().init()
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Open SafeDepositBox Folder', 'openfolder:', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Launch SafeDepositBox Website', 'launchsite:', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.separatorItem()
self.menu.addItem_(menuitem)
if type(1) == type(notification):
print type(notification)
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('%s GB used on Amazon\'s S3' % str(3.14 + .01*float(notification)), '', '')
else:
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Calculating usage...', '', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Estimated cost: $%s' % str("0.01"), '', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.separatorItem()
self.menu.addItem_(menuitem)
# Sync event is bound to sync_ method
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('All files up to date','','')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.separatorItem()
self.menu.addItem_(menuitem)
# Default event
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'terminate:', '')
self.menu.addItem_(menuitem)
# Bind it to the status item
self.statusitem.setMenu_(self.menu)
def applicationDidFinishLaunching_(self, notification):
self.counter = 0
statusbar = NSStatusBar.systemStatusBar()
# Create the statusbar item
self.statusitem = statusbar.statusItemWithLength_(NSVariableStatusItemLength)
# Load all images
for i in status_images.keys():
self.images[i] = NSImage.alloc().initByReferencingFile_(status_images[i])
# Set initial image
self.statusitem.setImage_(self.images['sdb'])
# Let it highlight upon clicking
self.statusitem.setHighlightMode_(1)
# Set a tooltip
self.statusitem.setToolTip_('Safe Deposit Box 0.1\n(Yay! Values in Technology =)')
# Build a very simple menu
self._build_menu(notification)
# Get the timer going
self.timer = NSTimer.alloc().initWithFireDate_interval_target_selector_userInfo_repeats_(start_time, 1.0, self, 'tick:', None, True)
NSRunLoop.currentRunLoop().addTimer_forMode_(self.timer, NSDefaultRunLoopMode)
self.timer.fire()
def sync_(self, notification):
print "sync"
def tick_(self, notification):
self.counter += 1
self._build_menu(self.counter)
print self.state
if __name__ == "__main__":
app = NSApplication.sharedApplication()
delegate = StatusBar.alloc().init()
app.setDelegate_(delegate)
AppHelper.runEventLoop()
| Python |
'''
Minimal setup.py example, run with:
% python setup.py py2app
'''
from distutils.core import setup
import py2app
NAME = 'SafeDepositBox'
SCRIPT = 'SafeDepositBox.py'
VERSION = '0.1'
ID = 'safedepositbox'
plist = dict(
CFBundleName = NAME,
CFBundleShortVersionString = ' '.join([NAME, VERSION]),
CFBundleGetInfoString = NAME,
CFBundleExecutable = NAME,
CFBundleIdentifier = 'com.trustycloudapps.%s' % ID,
LSUIElement = '1'
)
app_data = dict(script=SCRIPT, plist=plist)
setup(
options = dict(py2app = dict(iconfile = '../../../bin/images/safe.icns')),
app = [app_data],
data_files = ['../../../bin/images/safe3.png']
)
| Python |
import objc, re, os
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
status_images = {'sdb':'safe3.png'}
# Create syncing images...
start_time = NSDate.date()
class StatusBar(NSObject):
images = {}
statusbar = None
state = 'sdb'
def openfolder_(self, notification):
print "open folder"
def launchsite_(self, notification):
print "launch browser for our website"
def preferences_(self, notification):
print "preferences menu"
def help_(self, notification):
print "help menu"
def _build_menu(self, notification):
self.menu = NSMenu.alloc().init()
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Open SafeDepositBox Folder', 'openfolder:', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Launch SafeDepositBox Website', 'launchsite:', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.separatorItem()
self.menu.addItem_(menuitem)
if type(1) == type(notification):
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('%s GB used on Amazon\'s S3' % str(3.14 + .01*float(notification)), '', '')
else:
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Calculating usage...', '', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Estimated cost: $%s' % str("0.01"), '', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.separatorItem()
self.menu.addItem_(menuitem)
# Sync event is bound to sync_ method
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('All files up to date','','')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.separatorItem()
self.menu.addItem_(menuitem)
# Default event
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'terminate:', '')
self.menu.addItem_(menuitem)
# Bind it to the status item
self.statusitem.setMenu_(self.menu)
def applicationDidFinishLaunching_(self, notification):
self.counter = 0
statusbar = NSStatusBar.systemStatusBar()
# Create the statusbar item
self.statusitem = statusbar.statusItemWithLength_(NSVariableStatusItemLength)
# Load all images
for i in status_images.keys():
self.images[i] = NSImage.alloc().initByReferencingFile_(status_images[i])
# Set initial image
self.statusitem.setImage_(self.images['sdb'])
# Let it highlight upon clicking
self.statusitem.setHighlightMode_(1)
# Set a tooltip
self.statusitem.setToolTip_('Safe Deposit Box 0.1\n(Yay! Values in Technology =)')
# Build a very simple menu
self._build_menu(notification)
# Get the timer going
self.timer = NSTimer.alloc().initWithFireDate_interval_target_selector_userInfo_repeats_(start_time, 1.0, self, 'tick:', None, True)
NSRunLoop.currentRunLoop().addTimer_forMode_(self.timer, NSDefaultRunLoopMode)
self.timer.fire()
def sync_(self, notification):
print "sync"
def tick_(self, notification):
self.counter += 1
self._build_menu(self.counter)
print self.state
if __name__ == "__main__":
from SafeDepositBox import SafeDepositBox
from S3BucketPolicy import string_to_dns
from threading import Thread
s = SafeDepositBox()
Thread(target=s.s3bucket.proc_queue, args=(s.prefix_to_ignore, s.enc_service)).start()
s.start()
app = NSApplication.sharedApplication()
delegate = StatusBar.alloc().init()
app.setDelegate_(delegate)
AppHelper.runEventLoop()
| Python |
#!/usr/bin/env python
from Tkinter import *
class App:
def __init__(self, master):
frame = Frame(master)
frame.pack()
self.button = Button(frame, text="QUIT", fg="red", command=frame.quit)
self.button.pack(side=LEFT)
self.hi_there = Button(frame, text="Hello", command=self.say_hi)
self.hi_there.pack(side=LEFT)
def say_hi(self):
print "hi there, everyone!"
root = Tk()
app = App(root)
root.mainloop()
| Python |
from distutils.core import setup
import py2app
NAME = 'SafeDepositBox'
SCRIPT = 'SnowLeopard-SafeDepositBox.py'
VERSION = '0.1'
ID = 'safedepositbox'
plist = dict(
CFBundleName = NAME,
CFBundleShortVersionString = ' '.join([NAME, VERSION]),
CFBundleGetInfoString = NAME,
CFBundleExecutable = NAME,
CFBundleIdentifier = 'com.trustycloudapps.%s' % ID,
LSUIElement = '1'
)
app_data = dict(script=SCRIPT, plist=plist)
DATA_FILES = ['S3Interface.py',
'S3BucketPolicy.py',
'EncryptionService.py',
'util.py',
'constants.py',
'../../bin/images/safe3.png']
OPTIONS = {'iconfile': '../../bin/images/safe.icns',
'packages': ['email','boto'], # this is a hack. boto and py2app
# don't like each other otherwise.
'argv_emulation': True,
}
setup(
app = [app_data],
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| Python |
#!/usr/bin/env python
import BaseHTTPServer
import urllib, urlparse, urllib2
import cgitb, mimetypes
import ConfigParser
import json
import getpass, os, platform, sys, socket, time
from string import Template
from os.path import basename, join, isfile, isdir, expanduser, split
shutdown_time = -1
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
RESOURCE_DIR = './res/web/'
class HTMLTemplate(Template):
delimiter = '%'
class ConfigHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def write(self, ctype, data, code=200):
self.send_response(code)
self.send_header('Content-Type', ctype)
self.end_headers()
self.wfile.write(data)
def try_send_content(self, path):
path = './' + path[1:]
if not isfile(join(RESOURCE_DIR, path)):
return False
mtype = mimetypes.guess_type('file://' + path)
self.write(mtype[0], open(join(RESOURCE_DIR, path)).read())
return True
def tree_json(self, path, q):
d = expanduser(q['path'][0])
children = []
for f in sorted(os.listdir(d)):
if isdir(join(d, f)) and not f.startswith('.'):
children.append(dict(data = f, attr = {"path" : join(d, f)}, state = "closed", children = []))
if d != '~':
out = children
else:
out = [dict(data = split(d)[1], attr = {"path" : d}, state = 'open', children = children)]
self.write('text/javascript', json.dumps(out, indent=2))
def tmpl(self, path, q):
path = q['template'][0]
path = './' + path[1:]
mtype = mimetypes.guess_type('file://' + path)
ctmpl = HTMLTemplate(open(join(RESOURCE_DIR, path)).read())
content = ctmpl.substitute(dict(USER_EMAIL = '%s@%s' % (getpass.getuser(), socket.gethostname()),
COMPUTER_NAME = '%s' % platform.node()))
self.write(mtype[0], content)
def configure(self, path, q):
c = ConfigParser.ConfigParser()
c.add_section('sdb')
for key in ['name',
'userEmailAddress',
'userPassword',
'awsAccessKey',
'awsSecretKey',
'computerName',
'sdbDirectory']:
c.set('sdb', key, q[key][0])
admin_dir = os.path.join(os.environ["HOME"], ".safedepositbox")
try:
os.makedirs(admin_dir)
except OSError, e:
import errno
if e[0] != errno.EEXIST:
raise
c.write(open(os.path.join(admin_dir, 'safedepositbox.conf'), 'w'))
self.send_content('/success.html')
# let this request out, then shutdown
global shutdown_time
shutdown_time = time.time() + 3
def do_GET(self):
if self.path == '/':
self.path = '/tmpl?template=/configure.html'
url = urlparse.urlsplit(self.path, scheme='http')
path = url.path
q = urlparse.parse_qs(url.query)
if self.try_send_content(path):
return
f = getattr(self, path[1:], None)
if not f:
self.send_error(404, 'Missing resource "%s"' % path[1:])
else:
try:
f(path, q)
except:
import cStringIO
f = cStringIO.StringIO()
cgitb.Hook(file=f).handle()
self.write('text/html', f.getvalue(), code=500)
def configure():
def start_httpd():
global httpd
httpd = BaseHTTPServer.HTTPServer(('localhost', 8080), ConfigHandler)
httpd.timeout = 1
while 1:
httpd.handle_request()
if shutdown_time > 0:
print 'Shutting down in: %.2f' % (shutdown_time - time.time())
if time.time() > shutdown_time:
break
import threading
httpd_thread = threading.Thread(target = start_httpd)
httpd_thread.setDaemon(True)
httpd_thread.start()
import webbrowser
webbrowser.open_new('http://localhost:8080')
httpd_thread.join()
if __name__ == '__main__':
configure() | Python |
#! /usr/bin/env python
# encoding: utf-8
# waf 1.6.10
VERSION='0.3.3'
import sys
APPNAME='p2t'
top = '.'
out = 'build'
CPP_SOURCES = ['poly2tri/common/shapes.cc',
'poly2tri/sweep/cdt.cc',
'poly2tri/sweep/advancing_front.cc',
'poly2tri/sweep/sweep_context.cc',
'poly2tri/sweep/sweep.cc',
'testbed/main.cc']
from waflib.Tools.compiler_cxx import cxx_compiler
cxx_compiler['win32'] = ['g++']
#Platform specific libs
if sys.platform == 'win32':
# MS Windows
sys_libs = ['glfw', 'opengl32']
elif sys.platform == 'darwin':
# Apple OSX
sys_libs = ['glfw', 'OpenGL']
else:
# GNU/Linux, BSD, etc
sys_libs = ['glfw', 'GL']
def options(opt):
print(' set_options')
opt.load('compiler_cxx')
def configure(conf):
print(' calling the configuration')
conf.load('compiler_cxx')
conf.env.CXXFLAGS = ['-O3', '-ffast-math']
conf.env.DEFINES_P2T = ['P2T']
conf.env.LIB_P2T = sys_libs
def build(bld):
print(' building')
bld.program(features = 'cxx cxxprogram', source=CPP_SOURCES, target = 'p2t', uselib = 'P2T')
| Python |
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'feedsaver.views.home', name='home'),
# url(r'^feedsaver/', include('feedsaver.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| Python |
# Django settings for feedsaver project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'database3.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'sui2!(!ukaqd9et(j4l)85xd!x^lkwluu!ze#1@aiyqmpw*h80'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'feedsaver.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'saver',
'django.contrib.admin',
'south',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| Python |
# -*- coding:utf-8 -*-
from django.db import models
class News(models.Model):
SUBJECT_CHOICES = (
(u'br', u'Brasil'),
(u'mu', u'Mundo'),
(u'ne', u'Negócios'),
(u'ci', u'Ciência'),
(u'te', u'Tecnologia'),
(u'en', u'Entretenimento'),
(u'es', u'Esportes'),
)
title = models.CharField(max_length=200)
subject = models.CharField(max_length=200, choices=SUBJECT_CHOICES)
original = models.CharField(max_length=200)
pub_date = models.DateTimeField()
url = models.URLField(verify_exists=True)
news_id = models.CharField(max_length=200)
class NewsArchive(models.Model):
SUBJECT_CHOICES = (
(u'brasil', u'Brasil'),
(u'mundo', u'Mundo'),
(u'economia', u'Economia'),
(u'ciencia', u'Ciência'),
(u'tecnologia', u'Tecnologia'),
(u'entretenimento', u'Entretenimento'),
(u'esportes', u'Esportes'),
)
title = models.CharField(max_length=200,null=True,blank=True)
subject = models.CharField(max_length=200, choices=SUBJECT_CHOICES)
text = models.CharField(max_length=1000)
class VectorFeatures(models.Model):
SUBJECT_CHOICES = (
(u'brasil', u'Brasil'),
(u'mundo', u'Mundo'),
(u'economia', u'Economia'),
(u'ciencia', u'Ciência'),
(u'tecnologia', u'Tecnologia'),
(u'entretenimento', u'Entretenimento'),
(u'esportes', u'Esportes'),
)
type = models.CharField(max_length=200, choices=SUBJECT_CHOICES,null=True,blank=True)
feature = models.TextField()
values = models.TextField()
| Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
# Create your views here.
| Python |
# -*- coding:utf8 -*-
#!/usr/bin/env python
import os
import feedparser
import copy
import rfc822
import time
import datetime
import HTMLParser
import urllib
import lxml.html
import chardet
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils.encoding import smart_str, smart_unicode
from feedsaver.saver.models import NewsArchive
from lxml import etree
def tf(limit,subject,self):
class Command(BaseCommand):
args = 'no args'
help = 'Extract info from CPR'
def handle(self, *args, **options):
rawr(1223,'brasil',self)
| Python |
# -*- coding:utf8 -*-
#!/usr/bin/env python
import nltk, re
import numpy as np
from django.utils.encoding import smart_str, smart_unicode
from scikits.learn.svm import SVC
from django.core.management.base import BaseCommand, CommandError
from feedsaver.saver.models import *
def mountY():
# Array que conterá o numero da classe
arrayY = []
vfcount = VectorFeatures.objects.filter(type='brasil').count()
for i in range(0,vfcount/2):
# Brasil - 1
arrayY.append(1)
vfcount = VectorFeatures.objects.filter(type='mundo').count()
for i in range(0,vfcount/2):
# Mundo - 2
arrayY.append(2)
vfcount = VectorFeatures.objects.filter(type='economia').count()
for i in range(0,vfcount/2):
# Economia - 3
arrayY.append(3)
vfcount = VectorFeatures.objects.filter(type='tecnologia').count()
for i in range(0,vfcount/2):
# Tecnologia - 4
arrayY.append(4)
vfcount = VectorFeatures.objects.filter(type='ciencia').count()
for i in range(0,vfcount/2):
# Ciencia - 4
arrayY.append(4)
vfcount = VectorFeatures.objects.filter(type='entretenimento').count()
for i in range(0,vfcount/2):
# entretenimento - 5
arrayY.append(5)
return arrayY
def mountX():
# Array em cada indice corresponde a um vetor de caracteristicas
arrayX = []
vf = VectorFeatures.objects.filter(type='brasil')
vfcount = VectorFeatures.objects.filter(type='brasil').count()
for i in vf[:vfcount/2]:
arrayX.append(i.values.split(','))
vf = VectorFeatures.objects.filter(type='mundo')
vfcount = VectorFeatures.objects.filter(type='mundo').count()
for i in vf[:vfcount/2]:
arrayX.append(i.values.split(','))
vf = VectorFeatures.objects.filter(type='economia')
vfcount = VectorFeatures.objects.filter(type='economia').count()
for i in vf[:vfcount/2]:
arrayX.append(i.values.split(','))
vf = VectorFeatures.objects.filter(type='tecnologia')
vfcount = VectorFeatures.objects.filter(type='tecnologia').count()
for i in vf[:vfcount/2]:
arrayX.append(i.values.split(','))
vf = VectorFeatures.objects.filter(type='ciencia')
vfcount = VectorFeatures.objects.filter(type='ciencia').count()
for i in vf[:vfcount/2]:
arrayX.append(i.values.split(','))
vf = VectorFeatures.objects.filter(type='entretenimento')
vfcount = VectorFeatures.objects.filter(type='entretenimento').count()
for i in vf[:vfcount/2]:
arrayX.append(i.values.split(','))
return arrayX
class Command(BaseCommand):
args = 'no args'
help = 'Extract info from CPR'
def handle(self, *args, **options):
arrayY = np.array(mountY())
arrayX = np.array(mountX())
clf = SVC()
clf.fit(arrayX, arrayY)
vfs = VectorFeatures.objects.all()
vfscount = VectorFeatures.objects.all().count()
total = 0
acerto = 0
for vf in vfs[vfscount/2:]:
total = total + 1
pred = clf.predict(vf.values.split(','))
if (int(pred[0]) == 1 and vf.type == 'brasil'):
acerto = acerto+1
self.stdout.write("acertou brasil\n")
if (int(pred[0]) == 2 and vf.type == 'mundo'):
acerto = acerto+1
self.stdout.write("acertou mundo\n")
if (int(pred[0]) == 3 and vf.type == 'economia'):
acerto = acerto+1
self.stdout.write("acertou economia\n")
if (int(pred[0]) == 4 and vf.type == 'tecnologia'):
acerto = acerto+1
self.stdout.write("acertou tecnologia\n")
if (int(pred[0]) == 5 and vf.type == 'ciencia'):
acerto = acerto+1
self.stdout.write("acertou ciencia\n")
if (int(pred[0]) == 5 and vf.type == 'entretenimento'):
acerto = acerto+1
self.stdout.write("acertou entretenimento\n")
self.stdout.write("Acerto: "+str(acerto)+"\n")
self.stdout.write("Total: "+str(total)+"\n")
#vf = VectorFeatures.objects.filter(type='brasil')[1]
#self.stdout.write(str( clf.predict(vf.values.split(',') ) ))
#self.stdout.write('\n')
#self.stdout.write(vf.type)
#self.stdout.write('\n')
return | Python |
# -*- coding:utf8 -*-
#!/usr/bin/env python
from time import sleep
import nltk, re, math,threading
from django.utils.encoding import smart_str, smart_unicode
from django.core.management.base import BaseCommand, CommandError
from feedsaver.saver.models import *
def loop(self,subject):
global nglobal
stopwords = nltk.corpus.stopwords.words('portuguese')
#stemmer = nltk.stem.RSLPStemmer()
ns = NewsArchive.objects.filter(subject=subject)
words = []
for n in ns:
nglobal = nglobal+1
#command.stdout.write("N de noticias: "+str(nglobal)+'\n')
text = n.text
text = text.split(' ')
#Removing stopwords
for word in text:
word = smart_str(word, encoding='utf-8', strings_only=False, errors='strict')
if (len(word) > 2):
if word not in stopwords:
words.append(word)
return words
class loop_brasil ( threading.Thread ):
def run ( self ):
global allwords1
allwords1 = loop(self,'brasil')
class loop_mundo ( threading.Thread ):
def run ( self ):
global allwords2
allwords2 = loop(self,'mundo')
class loop_ciencia ( threading.Thread ):
def run ( self ):
global allwords3
allwords3 = loop(self,'ciencia')
class loop_tecnologia ( threading.Thread ):
def run ( self ):
global allwords4
allwords4 = loop(self,'tecnologia')
class loop_entretenimento( threading.Thread ):
def run ( self ):
global allwords5
allwords5 = loop(self,'entretenimento')
class loop_economia( threading.Thread ):
def run ( self ):
global allwords6
allwords6 = loop(self,'economia')
def mcw(self):
global allwords1
global allwords2
global allwords3
global allwords4
global allwords5
global allwords6
#Monta o array de features: 2000 palavras mais comuns de TODOS os textos
#Faz um loop por todos os textos de um assunto e retorna um array com todas as palavras
loop_brasil().start()
loop_mundo().start()
loop_ciencia().start()
loop_tecnologia().start()
loop_entretenimento().start()
loop_economia().start()
while (threading.activeCount() > 1):
sleep(1)
allwords = allwords1 + allwords2 + allwords3 + allwords4 + allwords5 + allwords6
vfa = VectorFeatures.objects.all()
if (vfa.count() != 0):
for v in vfa:
v.delete()
vf = VectorFeatures()
vf.type = 'principal'
array = []
array2 = []
# Verifica as 2000 palavras mais ocorrentes em todos os textos
# Salva a frequencia delas em todos os textos para calculo do IDF posteriormente
fd = nltk.FreqDist(w for w in allwords)
for word in fd.keys()[:2000]:
array.append(word)
array2.append(str(fd[word]))
#self.stdout.write(word+' '+str(fd[word])+'\n')
vf.feature = ','.join(array)
vf.values = ','.join(array2)
vf.save()
def subject_features(self,subject,vfeatures):
global n2global
ns = NewsArchive.objects.filter(subject=subject)
nscount = NewsArchive.objects.filter(subject=subject).count()
totalnews = NewsArchive.objects.all().count()
tmps = VectorFeatures.objects.filter(type=subject)
principal = VectorFeatures.objects.get(type='principal').values
principal = principal.split(',')
for t in tmps:
t.delete()
#Vetor de treinamento com metade das notícias
for n in ns[:(nscount/2)]:
n2global = n2global + 1
self.stdout.write(str(n2global)+'\n')
lista = []
vf = VectorFeatures()
vf.type = subject
vtext = n.text.split(' ')
for i in vfeatures:
lista.append(0)
k1 = 0
for i in vfeatures:
k2 = 0
for j in vtext:
if i == j:
# Calcula a frequencia das 2000 palavras nesse texto
lista[k1] = str(int(lista[k1]) + 1)
k2 = k2+1
k1 = k1+1
#TF-IDF
lenvtext = len(vtext)
for i in lista:
#tf
tf = (int(i) / lenvtext)
#self.stdout.write(principal[int(i)])
idf = math.log10(totalnews/int(principal[int(i)]))
i = tf*idf
vf.values = ','.join( map( str, lista ))
vf.save()
#self.stdout.write(vf.values)
#self.stdout.write('\n')
return
class Command(BaseCommand):
args = 'no args'
help = 'Extract info from CPR'
def handle(self, *args, **options):
global nglobal
global allwords
global allwords1
global allwords2
global allwords3
global allwords4
global allwords5
global allwords6
nglobal = 0
allwords = []
allwords1 = []
allwords2 = []
allwords3 = []
allwords4 = []
allwords5 = []
allwords6 = []
global command
command = self
mcw(command)
vfeatures = VectorFeatures.objects.get(type='principal').feature.split(',')
global n2global
n2global = 0
subject_features(self,'brasil',vfeatures)
subject_features(self,'mundo',vfeatures)
subject_features(self,'economia',vfeatures)
subject_features(self,'tecnologia',vfeatures)
subject_features(self,'ciencia',vfeatures)
subject_features(self,'entretenimento',vfeatures)
self.stdout.write('Fim da montagem de vetores de características\n')
return | Python |
# -*- coding:utf8 -*-
#!/usr/bin/env python
import os
import feedparser
import copy
import rfc822
import time
import datetime
from xml.etree import cElementTree as ElementTree
from threading import *
#from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from feedsaver.saver.models import News
def crawl(hit_list,subject,self):
# pull down all feeds
future_calls = [Future(feedparser.parse, rss_url) for rss_url in hit_list]
# block until they are all in
feeds = [future_obj() for future_obj in future_calls]
entries = []
for feed in feeds:
self.stdout.write(feed[ "channel" ][ "title" ])
self.stdout.write("\n")
for item in feed["items"]:
#First validates if the it's a new news
updated = datetime.datetime.fromtimestamp(time.mktime(rfc822.parsedate(item["updated"])))
exists = News.objects.filter(Q(news_id=item["id"]) , Q(pub_date__gte=updated)).count()
if (exists > 0):
continue # This news in the db is the latest updated version
else:
# Save the news in db
#debugging
#for i in item.keys():
#self.stdout.write(i)
#self.stdout.write("\n")
self.stdout.write(item["title"])
n = News()
n.original = item["summary"] # Original Text
n.title = item["title"] # News title
n.pub_date = updated # Date updated
n.news_id = item["id"] # News identificator
n.url = item["link"] # URL to the original news
n.subject = subject # Subject of the News
#n.save()
self.stdout.write("\n")
self.stdout.write("\n")
# entries.extend( feed[ "items" ] )
#
# sorted_entries = sorted(entries, key=lambda entry: entry["date_parsed"])
# sorted_entries = sorted_entries.reverse()
return
class Command(BaseCommand):
args = 'no args'
help = 'Extract info from CPR'
def handle(self, *args, **options):
self.stdout.write('>> Starting main loop.\n')
#Brazil News
#World News
folha_mundo = "http://fulltextrssfeed.com/feeds.folha.uol.com.br/mundo/rss091.xml"
#Negócios
folha_mercado = "http://fulltextrssfeed.com/feeds.folha.uol.com.br/mercado/rss091.xml"
#Science/Tecnology
folha_ciencia = "http://fulltextrssfeed.com/feeds.folha.uol.com.br/ciencia/rss091.xml"
folha_tec = "http://fulltextrssfeed.com/feeds.folha.uol.com.br/tec/rss091.xml"
#Entertainment
#Sports News
folha_esportes = "http://fulltextrssfeed.com/feeds.folha.uol.com.br/esporte/rss091.xml"
estadao_esportes = "http://fulltextrssfeed.com/estadao.feedsportal.com/c/33043/f/534114/index.rss"
terra_esportes = "http://fulltextrssfeed.com/rss.terra.com.br/0,,EI1137,00.xml"
hit_list = [folha_esportes,estadao_esportes]
crawl(hit_list,'es',self)
#Health
folha_equilibrio = "http://fulltextrssfeed.com/feeds.folha.uol.com.br/equilibrioesaude/rss091.xml"
class Future:
def __init__(self,func,*param):
# Constructor
self.__done=0
self.__result=None
self.__status='working'
self.__C=Condition() # Notify on this Condition when result is ready
# Run the actual function in a separate thread
self.__T=Thread(target=self.Wrapper,args=(func,param))
self.__T.setName("FutureThread")
self.__T.start()
def __repr__(self):
return '<Future at '+hex(id(self))+':'+self.__status+'>'
def __call__(self):
self.__C.acquire()
while self.__done==0:
self.__C.wait()
self.__C.release()
# We deepcopy __result to prevent accidental tampering with it.
a=copy.deepcopy(self.__result)
return a
def Wrapper(self, func, param):
# Run the actual function, and let us housekeep around it
self.__C.acquire()
try:
self.__result=func(*param)
except:
self.__result="Exception raised within Future"
self.__done=1
self.__status=`self.__result`
self.__C.notify()
self.__C.release() | Python |
# -*- coding:utf8 -*-
#!/usr/bin/env python
import nltk, re
from django.utils.encoding import smart_str, smart_unicode
from django.core.management.base import BaseCommand, CommandError
from feedsaver.saver.models import News,NewsArchive
def loop(self,ns):
stopwords = nltk.corpus.stopwords.words('portuguese')
#for n in ns
text = ns[0].text
text = text.lower()
#Removing punctuation
text = text.replace(',','')
text = text.replace('.',' ')
text = text.replace("'",'')
text = text.replace('"','')
text = text.replace('!','')
text = text.replace(';','')
text = text.replace(':','')
text = text.replace('(','')
text = text.replace(')','')
text = text.split(' ')
#text = ''.join(text)
#Removing stopwords
for word in text:
if word not in stopwords:
word = smart_str(word, encoding='utf-8', strings_only=False, errors='strict')
self.stdout.write(word)
self.stdout.write(' ')
return
def mcw(self):
#Most Common Words
#Faz um loop por todos os textos de um assunto e retorna um array com as principais palavras
#Brasil
ns = NewsArchive.objects.filter(subject='br')
return
class Command(BaseCommand):
args = 'no args'
help = 'Extract info from CPR'
def handle(self, *args, **options):
mcw(self)
return | Python |
# -*- coding:utf8 -*-
#!/usr/bin/env python
import os
import feedparser
import copy
import rfc822
import time
import datetime
import HTMLParser
import urllib
import lxml.html
import chardet
import threading
from unicodedata import normalize
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils.encoding import smart_str, smart_unicode
from feedsaver.saver.models import NewsArchive
from lxml import etree
def rawr(begin,limit,subject,self):
self.stdout.write('>> Starting main loop.\n')
for i in range(begin,limit):
ncount = NewsArchive.objects.filter(subject=subject).count()
#numero de noticias salva por assunto
if ncount >= 100:
return
url = "http://br.noticias.yahoo.com/"+ subject +"/arquivo/"+ str(i) +".html"
content = urllib.urlopen(url).read()
doc = lxml.html.fromstring(content, base_url=url)
doc.make_links_absolute()
for link in doc.cssselect('h4 a'):
link_content = smart_str(link.text_content(), encoding='utf-8', strings_only=False, errors='strict')
link_href = smart_str(link.get('href'), encoding='utf-8', strings_only=False, errors='strict')
self.stdout.write(link_content)
self.stdout.write('\n\n')
content = urllib.urlopen(link_href).read()
doc = lxml.html.fromstring(content)
text = ""
for p in doc.cssselect('div.yom-art-content div.bd p'):
#text += normalize('NFKD', p.text_content().decode('latin')).encode('utf-8', 'ignore')
#text += smart_unicode(p.text_content(), encoding='utf-8', strings_only=False, errors='strict')
tmp = p.text_content()
tmp = tmp.lower()
tmp = tmp.replace(',','')
tmp = tmp.replace('.',' ')
tmp = tmp.replace("'",'')
tmp = tmp.replace('"','')
tmp = tmp.replace('!','')
tmp = tmp.replace(';','')
tmp = tmp.replace(':','')
tmp = tmp.replace('(','')
tmp = tmp.replace(')','')
text += smart_str(tmp, encoding='utf-8', strings_only=True, errors='strict')
text += '\n'
if text.__len__() < 150:
self.stdout.write('texto muito pequeno\n')
text = ""
else:
# See if the text already exists in DB
title = smart_str(link.text_content(), encoding='utf-8', strings_only=False, errors='strict')
exists = NewsArchive.objects.filter(title=title).count()
self.stdout.write(subject+'\n')
if exists > 0:
self.stdout.write('Notícia já existente\n')
else:
# Save the text
n = NewsArchive()
n.title = link.text_content()
n.text = text
n.subject = subject
try:
n.save()
self.stdout.write('Notícia salva!\n')
except:
self.stdout.write('erro!\n')
pass
self.stdout.write(str(NewsArchive.objects.filter(subject=subject).count()))
self.stdout.write('\n\n')
class thread_brasil ( threading.Thread ):
def run ( self ):
rawr(0,100,'brasil',command)
class thread_mundo ( threading.Thread ):
def run ( self ):
rawr(0,100,'mundo',command)
class thread_tecnologia ( threading.Thread ):
def run ( self ):
rawr(0,100,'tecnologia',command)
class thread_ciencia ( threading.Thread ):
def run ( self ):
rawr(0,100,'ciencia',command)
class thread_entretenimento ( threading.Thread ):
def run ( self ):
rawr(0,100,'entretenimento',command)
class thread_economia ( threading.Thread ):
def run ( self ):
rawr(200,300,'economia',command)
class thread_economia2 ( threading.Thread ):
def run ( self ):
rawr(300,400,'economia',command)
class Command(BaseCommand):
args = 'no args'
help = 'Extract info from CPR'
def handle(self, *args, **options):
global command
command = self
#thread_tecnologia().start()
#thread_ciencia().start()
#thread_entretenimento().start()
thread_economia().start()
thread_brasil().start()
thread_mundo().start()
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
'''
Created on 21-03-2011
@author: maciek
'''
def formatString(format, **kwargs):
'''
'''
if not format: return ''
for arg in kwargs.keys():
format = format.replace("{" + arg + "}", "##" + arg + "##")
format = format.replace ("{", "{{")
format = format.replace("}", "}}")
for arg in kwargs.keys():
format = format.replace("##" + arg + "##", "{" + arg + "}")
res = format.format(**kwargs)
res = res.replace("{{", "{")
res = res.replace("}}", "}")
return res | Python |
'''
Created on 21-03-2011
@author: maciek
'''
from IndexGenerator import IndexGenerator
from optparse import OptionParser
import os
import tempfile
import shutil
import logging
logging.basicConfig(level = logging.DEBUG)
parser = OptionParser()
parser.add_option('-n', '--app-name', action='store', dest='appName', help='aplication name')
parser.add_option('-u', '--release-urls', action='store', dest='releaseUrls', help='URLs of download files - as coma separated list of entrires')
parser.add_option('-d', '--destination-directory', action='store', dest='otaAppDir', help='Directory where OTA files are created')
parser.add_option('-v', '--version', action='store', dest='version', help='Version of the application')
parser.add_option('-r', '--releases', action='store', dest='releases', help='Release names of the application')
parser.add_option('-R', '--release-notes', action='store', dest='releaseNotes', help='Release notes of the application (in txt2tags format)')
parser.add_option('-D', '--description', action='store', dest='description', help='Description of the application (in txt2tags format)')
(options, args) = parser.parse_args()
if options.appName == None:
parser.error("Please specify the appName.")
elif options.releaseUrls == None:
parser.error("Please specify releaseUrls")
elif options.otaAppDir == None:
parser.error("Please specify destination directory")
elif options.version == None:
parser.error("Please specify version")
elif options.releases == None:
parser.error("Please specify releases")
elif options.releaseNotes == None:
parser.error("Please specify releaseNotes")
elif options.description == None:
parser.error("Please specify description")
appName = options.appName
releaseUrls = options.releaseUrls
otaAppDir = options.otaAppDir
version = options.version
releases = options.releases
releaseNotes = options.releaseNotes
description = options.description
def findIconFilename():
iconPath = "res/drawable-hdpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable-mdpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable-ldpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable/icon.png"
logging.debug("IconPath: "+iconPath)
return iconPath
def createOTApackage():
'''
crates all needed files in tmp dir
'''
releaseNotesContent = open(releaseNotes).read()
descriptionContent = open(description).read()
indexGenerator = IndexGenerator(appName, releaseUrls, releaseNotesContent, descriptionContent, version, releases)
index = indexGenerator.get();
tempIndexFile = tempfile.TemporaryFile()
tempIndexFile.write(index)
tempIndexFile.flush()
tempIndexFile.seek(0)
return tempIndexFile
tempIndexFile = createOTApackage()
if not os.path.isdir(otaAppDir):
logging.debug("creating dir: "+otaAppDir)
os.mkdir(otaAppDir)
else:
logging.warning("dir: "+otaAppDir+" exists")
indexFile = open(os.path.join(otaAppDir,"index.html"),'w')
shutil.copyfileobj(tempIndexFile, indexFile)
srcIconFileName = findIconFilename()
disIconFileName = os.path.join(otaAppDir,"Icon.png")
shutil.copy(srcIconFileName,disIconFileName)
| Python |
'''
Created on 21-03-2011
@author: maciek
'''
from formater import formatString
import os
class IndexGenerator(object):
'''
Generates Index.html for iOS app OTA distribution
'''
basePath = os.path.dirname(__file__)
templateFile = os.path.join(basePath,"templates/index.tmpl")
releaseUrls = ""
appName = ""
changeLog = ""
description = ""
version = ""
release = ""
def __init__(self,appName, releaseUrls, changeLog, description, version, releases):
'''
Constructor
'''
self.appName = appName
self.releaseUrls = releaseUrls
self.changeLog = changeLog
self.description = description
self.version = version
self.releases = releases
def get(self):
'''
returns index.html source code from template file
'''
urlList = self.releaseUrls.split(",")
releaseList = self.releases.split(",")
generatedHtml=""
count=0;
for release in releaseList:
generatedHtml += " <li>\n"
generatedHtml += " <h3><a href=\"javascript:load('" + urlList[count] + "')\">" + release + "</a></h3>\n"
generatedHtml += " </li>\n"
count += 1
template = open(self.templateFile).read()
index = formatString(template, downloads=generatedHtml,
changeLog=self.changeLog,
appName=self.appName,
description=self.description,
version = self.version);
return index | Python |
import os
import sys
from multiprocessing import Process, current_process, freeze_support
import multiprocessing
from pyftpdlib import ftpserver
class YourHandler(ftpserver.FTPHandler):
def on_login(self, username):
# do something when user login
pass
def on_logout(self, username):
# do something when user logs out
pass
def on_file_sent(self, file):
# do something when a file has been sent
pass
def on_file_received(self, file):
# do something when a file has been received
pass
def on_incomplete_file_sent(self, file):
# do something when a file is partially sent
pass
def on_incomplete_file_received(self, file):
# remove partially uploaded files
import os
os.remove(file)
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (current_process().name, format%args))
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
# Start a multi-threading ftpServer in 1 Process
except KeyboardInterrupt:
pass
def runpool(number_of_processes):
# create a single server object -- children will each inherit a copy
authorizer = ftpserver.DummyAuthorizer()
authorizer.add_user('user', password="123456", homedir=os.getcwd() + "/REV", perm='elradfmw')
# handler = YourHandler
# If we use our logic
handler = ftpserver.FTPHandler
handler.tcp_no_delay = True
handler.authorizer = authorizer
address = ('192.168.203.167', 21)
server = ftpserver.FTPServer(address, handler)
# create child processes to act as workers
for i in range(number_of_processes-1):
Process(target=serve_forever, args=(server,)).start()
# main process also acts as a worker
serve_forever(server)
def main():
NUMBER_OF_PROCESSES = multiprocessing.cpu_count()
# Got the Server CPU number at Process Number
print "number of CPU is %d" % NUMBER_OF_PROCESSES
runpool(NUMBER_OF_PROCESSES)
if __name__ == '__main__':
freeze_support()
main()
# this is a multi-process ftp Server which every process in every CPU
# this is a multi-threading ftp Server which every process has many threading for a huge number of client
# design by Qunfei wu in Shanghai
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.