code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = getCurrentFolder(self.request.get("CurrentFolder",""))
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| Python |
# coding: UTF-8
from django.conf.urls.defaults import *
from views import feeds_dic
urlpatterns = patterns('',
(r'^$', 'views.index'),
(r'^login/$', 'views.login'),
(r'^robots.txt$', 'views.robots'),
(r'^blog/((?P<p_id>[\w-]+)/)?$', 'blog.views.index'),
#编辑日志
(r'^blog/diary/save/$', 'blog.views.save_diary'),
(r'^blog/diary/(?P<diary_id>[\w-]+)?/save/$', 'blog.views.save_diary'),
(r'^blog/diary/(?P<diary_id>[\w-]+/)?delete/$', 'blog.views.delete_diary'),
(r'^blog/diary/list/((?P<p_id>[\w-]+)/)?$', 'blog.views.list_diary'),
#编辑栏目
(r'^blog/category/(?P<category_id>[\w-]+/)?save/$', 'blog.views.save_category'),
(r'^blog/category/(?P<category_id>[\w-]+/)?delete/$', 'blog.views.delete_category'),
(r'^blog/category/list/$', 'blog.views.list_category'),
#显示日志
(r'^blog/diary/(?P<diary_id>[\w-]+)/$', 'blog.views.show_diary'),
(r'^blog/category/(?P<category_id>[\w-]+)/((?P<page_id>\d+)/)?$', 'blog.views.show_category_diary_list'),
(r'^blog/tag/(?P<tag_id>[\w-]+)/((?P<page_id>\d+)/)?$', 'blog.views.show_tag_diary_list'),
#链接
(r'^blog/link/(?P<link_id>[\w-]+/)?save/$', 'blog.views.save_link'),
(r'^blog/link/(?P<link_id>[\w-]+/)?delete/$', 'blog.views.delete_link'),
(r'^blog/link/list/$', 'blog.views.list_link'),
#评论
(r'^blog/(?P<diary_id>[\w-]+)/comment/save/$', 'blog.views.save_comment'),
(r'^blog/tag/(?P<t_id>\d+)/((?P<p_id>\d+)/)?$', 'blog.views.tag'),
(r'^work/$', 'views.work'),
(r'^resume/$', 'resume.views.index'),
(r'^photo/$', 'views.photo'),
#download
(r'^download/$', 'file.views.list_downloads'),
#RSS & ATOM
(r'^rss/(?P<url>.*)/$', 'syndication.views.feed', {'feed_dict': feeds_dic}),
)
| Python |
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Leo ZHOU', 'zhlwish@gmail.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
TIME_ZONE = 'Asia/Shanghai'
LANGUAGE_CODE = 'zh-CN'
SITE_ID = 1
USE_I18N = True
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
MEDIA_URL = 'http://localhost:8080/media/'
ADMIN_MEDIA_PREFIX = '/admin_media/'
SECRET_KEY = '7kd5$4g)bu)-nsr@2c7!*(fk@$8lsd4wz05sd+mpsfr9&c!d#@'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
TEMPLATE_CONTEXT_PROCESSORS=(
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'template'),
)
INSTALLED_APPS = (
'django.contrib.contenttypes',
'blog',
'link',
)
| Python |
# coding=utf-8
from google.appengine.ext import db
#分类
class Category(db.Model):
name = db.StringProperty( required = True)
order = db.IntegerProperty(required = True, default = 0)
#日志
class Link(db.Model):
url = db.URLProperty(required = True)
title = db.StringProperty(required = True)
order = db.IntegerProperty(required = True, default = 0)
#日志
class Diary(db.Model):
title = db.StringProperty()
content = db.TextProperty()
post_time = db.DateTimeProperty( auto_now_add = True)
last_modify_time = db.DateTimeProperty( auto_now = True )
author = db.UserProperty(auto_current_user=True)
published = db.BooleanProperty( required = True, default = False )
read_count = db.IntegerProperty( required = True, default = 0 )
category = db.ReferenceProperty(Category)
@property
def tags(self):
tags = [tag.name for tag in Tag.gql("WHERE diaries = :1", self.key())]
return ' '.join(tags)
def tags_set(self):
return [tag for tag in Tag.gql("WHERE diaries = :1", self.key())]
#标签
class Tag(db.Model):
name = db.StringProperty( required = True )
diaries = db.ListProperty(db.Key)
#评论
class Comment(db.Model):
content = db.StringProperty( multiline=True )
author = db.UserProperty(auto_current_user=True)
nick_name = db.StringProperty()
email = db.EmailProperty()
post_time = db.DateTimeProperty( auto_now_add = True )
diary = db.ReferenceProperty(Diary)
| Python |
# -*- coding: UTF-8 -*-
from django import template
from django.template import Node, NodeList, Template, Context, Variable
from django.template import TemplateSyntaxError, VariableDoesNotExist, BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END, SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END
register = template.Library()
#自定义filter
def id(value, arg=None):
if value:
return value.key()
else:
return ''
def count(value, arg=None):
if value:
return value.count()
else:
return 0
id.is_safe = False
count.is_safe = False
register.filter(id)
register.filter(count)
#自定义tag
class IfKeyEqualNode(Node):
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = Variable(var1), Variable(var2)
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfKeyEqualNode>"
def render(self, context):
try:
val1 = self.var1.resolve(context).key()
except VariableDoesNotExist:
val1 = None
try:
val2 = self.var2.resolve(context).key()
except VariableDoesNotExist:
val2 = None
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
def do_ifkeyequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError, "%r takes two arguments" % bits[0]
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return IfKeyEqualNode(bits[1], bits[2], nodelist_true, nodelist_false, negate)
#@register.tag
def ifkeyequal(parser, token):
return do_ifkeyequal(parser, token, False)
ifequal = register.tag(ifkeyequal) | Python |
# coding=utf-8
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render_to_response
from django.core import serializers
from django.utils import simplejson
from django.template import Template
from django.template import Context
import template
from google.appengine.api import users
from google.appengine.ext import db
from blog.models import Category
from blog.models import Diary
from blog.models import Tag
from blog.models import Comment
from link.models import Link
def gen_general_page(request, extra_context):
login_user = users.get_current_user()
login_url = users.create_login_url(request.get_full_path())
is_user_admin = users.is_current_user_admin()
logout_url = users.create_logout_url(request.get_full_path())
context = {'login_url':login_url,
'logout_url':logout_url,
'login_user':login_user,
'is_user_admin':is_user_admin
}
context.update(extra_context)
return render_to_response('blog/index.html', context)
def index(request, p_id):
if request.method == 'GET':
tags = Tag.all()
links = Link.all()
if p_id == None:
p_id = 1
context = {'mod':'blog',
'tags':tags,
'comment_list':template.gen_comment_list_html(),
'links':links,
'category_list':template.gen_category_list_html(),
'link_list':template.gen_link_list_html(),
'diary_list':template.gen_diary_list_html(p_id),
'tag_list':template.gen_tag_list_html(),
}
return gen_general_page(request, context)
##############
#Diary
##############
def list_diary(request, p_id):
return HttpResponse(template.gen_diary_list_html(p_id))
def show_diary(request, diary_id):
if request.is_ajax():
return HttpResponse(template.gen_diary_html(diary_id))
else:
#如果不是Ajax请求,则生成整个页面
tags = Tag.all()
links = Link.all()
context = {'mod':'blog',
'tags':tags,
'comment_list':template.gen_comment_list_html(),
'links':links,
'category_list':template.gen_category_list_html(),
'link_list':template.gen_link_list_html(),
'diary_list':template.gen_diary_html(diary_id),
'tag_list':template.gen_tag_list_html(),
}
return gen_general_page(request, context)
def save_diary(request, diary_id=None):
if request.method == 'POST':
if diary_id:
diary = db.get(diary_id)
else:
diary = Diary()
#获取category
diary_category = db.get(request.POST['diary_category'])
#保存diary
diary.title = request.POST['diary_title']
diary.content = db.Text(request.POST['diary_content'])
diary.category = diary_category.key()
diary.put()
#保存tag
diary_tags = request.POST['diary_tags'].strip().split(' ')
for diary_tag in diary_tags:
tag = Tag.gql("where name='%s'" % diary_tag).get()
if tag == None:
tag = Tag(name=diary_tag)
tag.diaries.append(diary.key())
tag.put()
#TODO:增加对Tag的修改
return HttpResponse('ok')
else:
if diary_id:
diary = db.get(diary_id)
else:
diary = None
categories = Category.all()
return render_to_response('blog/diary_form.html', {'diary': diary, 'categories':categories, })
def delete_diary(request, diary_id):
if request.method == 'POST' and users.is_current_user_admin():
id = diary_id[0:-1]
db.delete(id)
#TODO:删除后直接跳传到那一页
return HttpResponse(template.gen_diary_list_html(1))
else:
return HttpResponse('access_deny')
##############
#Category
##############
def list_category(request):
return HttpResponse(template.gen_category_list_html())
def delete_category(request, category_id):
if request.method == 'POST' and users.is_current_user_admin():
id = category_id[0:-1]
db.delete(id)
return HttpResponse(template.gen_category_list_html())
else:
return HttpResponse('access_deny')
def save_category(request, category_id):
if request.method == 'POST':
if users.is_current_user_admin():
category = Category(name=request.POST['name'], order=0)
category.put()
return HttpResponse('ok')
else:
return HttpResponse('access_deny')
else:
if category_id:
category = db.get(category_id)
else:
category = None
return render_to_response('blog/category_form.html', {'category': category, })
def show_category_diary_list(request, category_id, page_id):
if request.is_ajax():
return HttpResponse(template.gen_diary_list_html(c_id=category_id, p_id=page_id))
else:
tags = Tag.all()
links = Link.all()
context = {'mod':'blog',
'tags':tags,
'comment_list':template.gen_comment_list_html(),
'links':links,
'category_list':template.gen_category_list_html(),
'link_list':template.gen_link_list_html(),
'diary_list':template.gen_diary_list_html(p_id=page_id, c_id=category_id),
'tag_list':template.gen_tag_list_html(),
}
return gen_general_page(request, context)
##############
#Link
##############
def list_link(request):
return HttpResponse(template.gen_link_list_html())
def save_link(request, link_id):
if request.method == 'POST':
if users.is_current_user_admin():
link = Link(title=request.POST['title'], url=request.POST['url'], order=0)
link.put()
return HttpResponse('ok')
else:
return HttpResponse('access_deny')
else:
if link_id:
link = db.get(link_id)
else:
link = None
return render_to_response('blog/link_form.html', {'link': link, })
def delete_link(request, link_id):
if request.method == 'POST' and users.is_current_user_admin():
id = link_id[0:-1]
db.delete(id)
return HttpResponse(template.gen_link_list_html())
else:
return HttpResponse('access_deny')
##############
#Comment
##############
def save_comment(request, diary_id):
if diary_id:
parent_diary = db.get(diary_id)
if parent_diary:
if request.method == 'POST':
comment = Comment(content=request.POST['content'],
nick_name=request.POST['nick_name'],
email=request.POST['email'],
diary=parent_diary.key())
comment.put()
return HttpResponse(template.gen_diary_html(diary_id))
if request.method == 'GET':
return HttpResponse(template.gen_comment_form(diary_id))
else:
return HttpResponse('access_deny')
##############
#Tag
##############
def show_tag_diary_list(request, tag_id, page_id):
if request.is_ajax():
return HttpResponse(template.gen_diary_list_html(t_id=tag_id, p_id=page_id))
else:
tags = Tag.all()
links = Link.all()
context = {'mod':'blog',
'tags':tags,
'comment_list':template.gen_comment_list_html(),
'links':links,
'category_list':template.gen_category_list_html(),
'link_list':template.gen_link_list_html(),
'diary_list':template.gen_diary_list_html(p_id = page_id, t_id = tag_id),
'tag_list':template.gen_tag_list_html(),
}
return gen_general_page(request, context) | Python |
# coding=utf-8
from __future__ import division
from django.template import Template
from django.template import Context
from django.core.paginator import *
from google.appengine.api import users
from google.appengine.ext import db
from blog.models import Category
from blog.models import Diary
from blog.models import Tag
from blog.models import Comment
from link.models import Link
from helper import rand_color
category_list = '''
{% load blog_extras %}
<ul class="sidemenu">
{% for category in categories %}
<li>
<a href="/blog/category/{{category|id}}/">{{category.name}}({{category.diary_set|count}})</a>
{% if is_user_admin %}
<div id="delete_category_{{category|id}}" class="delete_icon"></div>
{% endif %}
<div class="clear"/>
</li>
{% endfor %}
</ul>
'''
tag_list = '''
{% load blog_extras %}
<div class="sidemenu, tag_cloud">
{% for tag in tags %}
{% autoescape off %}{{tag}}{% endautoescape %}
{% endfor %}
</div>
'''
link_list = '''
{% load blog_extras %}
<ul class="sidemenu">
{% for link in links %}
<li>
<a href="{{link.url}}" target="_blank">{{link.title}}</a>
{% if is_user_admin %}
<div id="delete_link_{{link|id}}" class="delete_icon"></div>
{% endif %}
<div class="clear"/>
</li>
{% endfor %}
</ul>
'''
comment_list = '''
{% load blog_extras %}
{% for comment in comments %}
<p class="comment_summary">{{comment.content}}</p>
{% endfor %}
'''
diary_list = '''
{% load blog_extras %}
{% if diarys.object_list|length_is:"0" %}
暂时还没有日志,谢谢关注!
{% else %}
{% for diary in diarys.object_list %}
<a href="/blog/diary/{{diary|id}}/">
<h1>{{diary.title}}</h1>
</a>
{% autoescape off %}<div class="diary_summary">{{diary.content}}</div>{% endautoescape %}
<div class="post-footer align-left">
<div style="float:left;">标签:
{% for tag in diary.tags_set %}
<a href="/blog/tag/{{tag|id}}/">{{tag.name}} </a> {% if not forloop.last %} | {% endif %}
{% endfor %}
</div>
<div style="float:right;">
<a href="/blog/diary/{{diary|id}}/#comment" class="comments">评论({{diary.comment_set|count}})</a>
<span class="read_count">点击次数:{{diary.read_count}}</span>
<span class="date">{{diary.post_time|date:"Y-m-d H:i"}}</span>
{% if is_user_admin %}
<div id="delete_diary_{{diary|id}}" class="delete_icon"></div>
<div id="edit_diary_{{diary|id}}" class="edit_icon"></div>
{% endif %}
</div>
<div style="clear:both;"></div>
</div>
<div style="clear:both;"></div>
{% endfor%}
<div class="pagination">
<span class="step-links">
共{{diarys.paginator.num_pages}}页|
{% ifnotequal diarys.number 1 %}<a href="/blog/1/">第一页</a>{% endifnotequal %}
{% ifequal diarys.number 1 %}第一页{% endifequal %}
{% if diarys.has_previous %}<a href="/blog/{{ diarys.previous_page_number }}/">上一页</a>{% endif %}
{% if not diarys.has_previous %}上一页 {% endif %}
{% for index in diarys.paginator.page_range%}
{% ifequal diarys.number index%}
[{{index}}]
{% endifequal%}
{% ifnotequal diarys.number index%}
<a href="/blog/{{ index }}/">[{{index}}]</a>
{% endifnotequal%}
{% endfor %}
{% if diarys.has_next %}<a href="/blog/{{ diarys.next_page_number }}/">下一页</a>{% endif %}
{% if not diarys.has_next %}下一页 {% endif %}
{% ifnotequal diarys.number diarys.paginator.num_pages %}<a href="/blog/{{diarys.paginator.num_pages}}/">最末页</a>{% endifnotequal %}
{% ifequal diarys.number diarys.paginator.num_pages %}最末页{% endifequal %}
</span>
</div>
{% endif %}
'''
diary_template = '''
{% load blog_extras %}
<a href="/blog/diary/{{diary|id}}/">
<h1>{{diary.title}}</h1>
</a>
{% autoescape off %}<div>{{diary.content}}</div>{% endautoescape %}
<div class="post-footer align-left">
<div style="float:left;">标签:
{% for tag in diary.tags_set %}
<a href="/blog/tag/{{tag|id}}/">{{tag.name}} </a> {% if not forloop.last %} | {% endif %}
{% endfor %}
</div>
<div style="float:right;">
<a href="#comment" class="comments">评论({{diary.comment_set|count}})</a>
<span class="read_count">点击次数:{{diary.read_count}}</span>
<span class="date">{{diary.post_time|date:"Y-m-d H:i"}}</span>
{% if is_user_admin %}
<div id="delete_diary_{{diary|id}}" class="delete_icon"></div>
{% endif %}
</div>
<div style="clear:both;"></div>
</div>
<div style="clear:both;"></div>
<div id="comments" >
<a name="comment"></a>
<label>评论 <input style="float:" id="add_comment_{{diary|id}}" class="button" type="button" value="我也评论"/></label>
{% for comment in diary.comment_set%}
<div class="comment">
<div class="comment_head"><a href="mailto:{{comment.email}}">{{comment.nick_name}}({{comment.author}})</a>发表于<span class="date" style="float:none;">{{comment.post_time|date:"Y-m-d H:i"}}</span></div>
<p>{{comment.content}}</p>
</div>
{% endfor %}
</div>
'''
comment_form_template = '''
<form id="add_comment_form" method="POST">
<input type="hidden" name="diary_id" value="{{diary_id}}" />
<p>
<label>姓名:</label>
<input id="id_nick_name" name="nick_name" value="{{login_user.nickname}}" type="text" size="30" />
</p>
<p>
<label>Email:</label>
<input id="id_email" name="email" value="{{login_user.email}}" type="text" size="30" />
</p>
<p>
<label>我想说:</label>
<textarea id="id_content" name="content"></textarea>
</p>
</form>
'''
def gen_category_list_html():
categories = Category.all()
tmp = Template(category_list)
ctx = Context({'categories':categories,
'is_user_admin':users.is_current_user_admin(),
})
return tmp.render(ctx)
def gen_link_list_html():
link = Link.all()
tmp = Template(link_list)
ctx = Context({'links':link,
'is_user_admin':users.is_current_user_admin(),
})
return tmp.render(ctx)
def gen_diary_list_html(p_id, c_id=None, t_id=None):
if c_id:
category = db.get(c_id)
diarys_all = category.diary_set.order('-post_time')
elif t_id:
d_ids = db.get(t_id).diaries
diarys_all = [db.get(d_id) for d_id in d_ids]
else:
diarys_all = Diary.all().order('-post_time')
paginator = Paginator(diarys_all, 8)
try:
page_index = int(p_id)
except (ValueError, TypeError):
page_index = 1
try:
diarys = paginator.page(page_index)
except (EmptyPage, InvalidPage):
diarys = paginator.page(paginator.num_pages)
tmp = Template(diary_list)
ctx = Context({'diarys':diarys,
'is_user_admin':users.is_current_user_admin(),
})
return tmp.render(ctx)
def gen_diary_html(diary_id):
try:
diary = Diary.get(diary_id)
diary.read_count = diary.read_count + 1
diary.put()
ctx = Context({'diary':diary,})
tmp = Template(diary_template)
return tmp.render(ctx)
except:
return "没有找到相应日志,该日志已经被删除或者您输入的URL错误。"
def gen_tag_list_html():
tags = Tag.all()
tag_weighing = [len(tag.diaries) for tag in tags]
tag_strs = ""
if(len(tag_weighing) != 0):
max_w = max(tag_weighing)
min_w = min(tag_weighing)
weighing = float(3/((max_w - min_w) + 1))
tag_strs = [];
for tag in tags:
font_size = str(weighing * len(tag.diaries) + 12) + "px";
color = rand_color()
tag_strs.append("<a href='/blog/tag/" + str(tag.key()) + "/' style='color:" + color + ";font-size:" + font_size + ";'>" + tag.name + "</a> ")
tmp = Template(tag_list)
ctx = Context({'tags':tag_strs})
return tmp.render(ctx)
def gen_comment_form(diary_id):
tmp = Template(comment_form_template)
ctx = Context({'diary_id':diary_id,
'login_user':users.get_current_user()})
return tmp.render(ctx)
def gen_comment_list_html():
comments = Comment.all().order('-post_time')[:4]
tmp = Template(comment_list)
ctx = Context({'comments':comments})
return tmp.render(ctx) | Python |
#coding=utf-8
from django.http import HttpResponse
from django.http import HttpResponsePermanentRedirect
from django.shortcuts import render_to_response
from syndication.feeds import Feed
from blog.models import Diary
def index(request):
return render_to_response('index.html')
def login(request):
if request.method == 'POST':
pass
return render_to_response('index.html')
def robots(request):
return render_to_response('robots.txt')
def photo(request):
return HttpResponsePermanentRedirect('http://picasaweb.google.com/zhlwish/')
class BlogFeed(Feed):
title = '没有比人更高的山——周亮的博客'
link = 'http://www.zhlwish.com/blog/'
description = '周亮的博客,网址:http://www.zhlwish.com,EMail:zhlwish@gmail.com'
cpoyright = 'Copyright (c) 2008, www.zhlwish.com'
def items(self):
return Diary.all().order('-post_time')[:10]
def get_absolute_url(self, item):
return 'http://www.zhlwish.com/blog/diary/%s/' % item.key()
def item_title(self, item):
return item.title
def item_content(self, item):
return item.content
def item_link(self, item):
return 'http://www.zhlwish.com/blog/diary/%s/' % item.key()
def item_author_name(self, item):
return item.author
def item_pubdate(self, item):
return item.post_time
def item_categories(self, item):
return (item.category.name ,)
class DownloadFeed(Feed):
pass
feeds_dic = {
'blog':BlogFeed,
'download': DownloadFeed,
}
| Python |
import random
def rand_color():
hex_char = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f')
color = "#"
for i in range(6):
color += random.choice(hex_char)
if color == "#ffffff":
return rand_color()
else:
return color | Python |
# coding=utf-8
from google.appengine.ext import db
#留言
class Message(db.Model):
content = db.StringProperty( multiline=True )
author = db.UserProperty(auto_current_user=True)
nick_name = db.StringProperty()
email = db.EmailProperty()
post_time = db.DateTimeProperty( auto_now_add = True )
parent = db.ReferenceProperty
| Python |
import os.path
import csv
import glob
weather_years = [2008, 2009]
# Load the mapping from airports to weather station WBAN codes.
airports = { }
with open('data/airport_weather_stations.csv') as f :
data = csv.DictReader(f, delimiter=',', quotechar='"')
for line in data :
airports[line["airport"]] = line["wban"]
# Prepare output of weather data by airport and date
output = csv.writer(open('data/airport_weather.csv', 'w'))
output.writerow(('airport', 'date', 'temp', 'precip', 'snowamt', 'fog', 'rain', 'snow', 'hail', 'thunder', 'tornado'))
# Scan weather data
airportnames = airports.keys()
airportnames.sort()
for a in airportnames :
for year in weather_years :
fn = 'data/ncdc-noaa/gsod_' + str(year) + '/' + airports[a] + '-' + str(year) + '.op'
if not os.path.exists(fn) :
continue
with open(fn) as f :
f.readline()
for line in f :
date = line[14:22]
temp = line[24:30]
if temp == "9999.9" :
temp = None
else :
temp = float(temp)
precip = line[118:123]
if precip == "99.99" :
precip = None # maybe zero
else :
precip = float(precip)
snowamt = line[125:130]
if snowamt == "999.9" :
snowamt = None # maybe zero
else :
snowamt = float(snowamt)
fog = (line[132] == "1")
rain = (line[133] == "1")
snow = (line[134] == "1")
hail = (line[135] == "1")
thunder = (line[136] == "1")
tornado = (line[137] == "1")
output.writerow((a, date, temp, precip, snowamt, fog, rain, snow, hail, thunder, tornado))
| Python |
import csv
import sys
import glob
import os
import os.path
holidays = {
"2008-05-25": "memorial-1",
"2008-05-26": "memorial",
"2008-05-27": "memorial+1",
"2008-09-01": "labor",
"2008-11-26": "thanksgiving-1",
"2008-11-27": "thanksgiving",
"2008-11-28": "thanksgiving+1",
"2008-11-29": "thanksgiving+2",
"2008-11-30": "thanksgiving+3",
"2008-12-24": "christmas-1",
"2008-12-25": "christmas",
"2008-12-26": "christmas+1",
}
# This fails if the dir already exists, which is good because
# we need to start with an empty directory.
os.mkdir("data/tmp")
filecache = []
def openfile(fn, fieldnames) :
for cfn, cfo, cfw in filecache :
if cfn == fn :
return cfw
if len(filecache) == 15 :
cfn, cfo, cfw = filecache[0]
cfo.close()
filecache.pop(0)
newfile = not os.path.exists(fn)
fo = open(fn, "a")
if newfile :
fo.write(",".join(fieldnames) + "\n")
fw = csv.DictWriter(fo, fieldnames)
filecache.append( (fn, fo, fw) )
return fw
# Load in the on-time records one by one and split it out into temporary
# files according to how we want to splice the data.
for fn in sorted(glob.glob('data/ontime/stats/*.csv')) :
print fn + "..."
with open(fn) as f :
data = csv.DictReader(f, delimiter=',', quotechar='"')
for line in data :
hour = line["CRS_DEP_TIME"][0:2]
holiday = ""
if line["FL_DATE"] in holidays :
holiday = holidays[line["FL_DATE"]]
# Split data by:
# origin, destination, carrier, flight#, day of week, hour, holiday
for key in (
(line["ORIGIN"], "", "", "", "", "", holiday),
(line["ORIGIN"], "", "", "", line["DAY_OF_WEEK"], "", ""),
(line["ORIGIN"], "", "", "", "", hour, ""),
("", line["DEST"], "", "", "", "", ""),
(line["ORIGIN"], line["DEST"], "", "", "", "", ""),
(line["ORIGIN"], line["DEST"], line["UNIQUE_CARRIER"], line["FL_NUM"], "", "", ""),
("", "", line["UNIQUE_CARRIER"], "", "", "", ""),
) :
keyfn = "data/tmp/" + ",".join(key) + ".csv"
wr = openfile(keyfn, data.fieldnames)
wr.writerow(line)
for cfn, cfo, cfw in filecache :
cfo.close()
| Python |
# Match up three-letter airport codes to three-letter weather
# station call signs, as best we can.
import os.path
import csv
import glob
weather_years = [2008, 2009]
# Get list of airport codes in the data.
airports = { }
airport_descr = { }
with open('data/ontime/meta/L_AIRPORTS.csv') as f :
data = csv.DictReader(f, delimiter=',', quotechar='"')
for line in data :
airports[line["Code"]] = []
airport_descr[line["Code"]] = line["Description"]
# Read manually set station codes.
airport_station_codes = { }
airport_manually_set = { }
if os.path.exists('additional_airport_weather_stations.txt') :
with open('additional_airport_weather_stations.txt') as f :
for line in f :
fields = line.strip().split()
if len(fields) == 2 :
airport_station_codes[fields[1]] = fields[0]
# Scan weather station list to find WBAN codes. Some airports
# seem to have more than one entry, and I'm not sure what governs
# which has a corresponding weather data file. Collect all of
# the codes that match.
wban_call = { }
wban_descr = { }
wban_state = { }
with open('data/ncdc-noaa/ish-history.csv') as f :
data = csv.DictReader(f, delimiter=',', quotechar='"')
for line in data :
if line["WBAN"] == "99999" :
continue
wban = line["USAF"]+"-"+line["WBAN"]
wban_call[wban] = line["CALL"]
wban_descr[wban] = line["STATION NAME"] + ", " + line["CTRY"] + ", " + line["STATE"]
wban_state[wban] = line["STATE"]
with open('data/ncdc-noaa/ish-history.csv') as f :
data = csv.DictReader(f, delimiter=',', quotechar='"')
for line in data :
if line["WBAN"] == "99999" :
continue
wban = line["USAF"]+"-"+line["WBAN"]
cx = line["CALL"]
if len(cx) < 3 :
continue
if cx in airports :
airports[cx].append(wban)
if cx[0] == "K" and cx[1:] in airports :
airports[cx[1:]].append(wban)
if cx[0] == "P" and cx[1:] in airports :
airports[cx[1:]].append(wban)
with open('data/ncdc-noaa/ish-history.csv') as f :
data = csv.DictReader(f, delimiter=',', quotechar='"')
for line in data :
if line["WBAN"] == "99999" :
continue
wban = line["USAF"]+"-"+line["WBAN"]
cx = line["CALL"]
if cx in airport_station_codes :
a = airport_station_codes[cx]
if not a in airport_manually_set :
airports[a] = []
airports[a].append(wban)
airport_manually_set[a] = True
# Prepare output of the matching between airports and station.
output = csv.writer(open('data/airport_weather_stations.csv', 'w'))
output.writerow(('airport', 'station', 'airport_descr', 'station_descr', 'wban'))
# Scan weather data to pick out the first matching WBAN code for
# an airport that actually has data.
airportnames = airports.keys()
airportnames.sort()
for a in airportnames :
found_wban = None
for year in weather_years :
for wban in airports[a] :
fn = 'data/ncdc-noaa/gsod_' + str(year) + '/' + wban + '-' + str(year) + '.op'
if not os.path.exists(fn) :
continue
if not a in airport_manually_set :
# Check that airport state matches station state to catch most
# of the errors of identifying an international airport code
# with a domestic weather station call sign.
try :
a_loc, a_nam = airport_descr[a].split(": ")
a_city, a_state = a_loc.split(", ")
if a_state != wban_state[wban] :
#print a_state, wban_state[wban], a_nam, wban_descr[wban]
continue
except :
continue
found_wban = wban
break # take weather from first matching data file for the airport
if found_wban != None :
output.writerow((a, wban_call[found_wban], airport_descr[a], wban_descr[found_wban], found_wban))
#else :
# print a, airport_descr[a]
| Python |
import csv
import sys
import glob
import os
import os.path
def mean(data) :
if len(data) < 10 :
return None
m = 0.0
for x in data :
if x == True :
x = 1
elif x == False :
x = 0
m += x
m /= float(len(data))
return m
def percentiles(data) :
if len(data) < 10 :
return [None,None,None]
data.sort()
return (data[int(len(data)*.15)], data[int(len(data)*.50)], data[int(len(data)*.85)])
# Load airport weather CSV file into a hashtable.
Weather = { }
with open('data/airport_weather.csv') as f :
data = csv.DictReader(f)
for line in data :
if not line["airport"] in Weather :
Weather[line["airport"]] = { }
Weather[line["airport"]][line["date"]] = { }
for w in ('fog', 'rain', 'snow', 'hail', 'thunder', 'tornado') :
Weather[line["airport"]][line["date"]][w] = (line[w] == 'True')
# The weather segments.
obs_cols = ["all", "origin_any", "origin_fog", "origin_rain", "origin_snow", "origin_hail", "origin_thunder", "origin_tornado", "dest_any", "dest_fog", "dest_rain", "dest_snow", "dest_hail", "dest_thunder", "dest_tornado", "either_any"]
# Initialize the output file. The first set of column headers
# must match the fields stored in the filenames as created by
# splitdata.py.
writer = csv.writer(open('data/ontime.csv', 'w'), delimiter=',', quotechar='"')
writer.writerow(
['origin', 'dest', 'carrier', 'flightnum', 'dayofweek', 'hour', 'holiday',
'firstdate', 'lastdate',
'condition',
'count', 'pct_cancel', 'pct_20mindelay', 'pct_ontime', 'delay_15thpctile', 'delay_median', 'delay_85thpctile']
)
# Go through each of the spliced data files. Each spliced file gets summarized
# as a single row in the output file.
for fn in glob.glob('data/tmp/*.csv') :
# Extract out the part of the file name that serves as the
# primary key fields for the statistics we will generate
# from the file.
key = fn[len('data/tmp/'):len(fn)-len('.csv')].split(",")
first_flight = None
last_flight = None
cancelled = [([],[]) for x in obs_cols]
delayed = [([],[]) for x in obs_cols]
ontime = [([],[]) for x in obs_cols]
arrdelay = [([],[]) for x in obs_cols]
with open(fn) as f :
data = csv.DictReader(f, delimiter=',', quotechar='"')
for line in data :
if first_flight == None or line["FL_DATE"] < first_flight :
first_flight = line["FL_DATE"]
if last_flight == None or line["FL_DATE"] > last_flight :
last_flight = line["FL_DATE"]
# If we don't have origin and destination weather data
# for this flight, we'll just include it in the total
# column.
w = [
True,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None
]
if line["ORIGIN"] in Weather and line["DEST"] in Weather :
d = line["FL_DATE"].replace("-", "")
if d in Weather[line["ORIGIN"]] and d in Weather[line["DEST"]] :
# Binary weather observations for this day.
w = [
True,
Weather[line["ORIGIN"]][d]["fog"] or Weather[line["ORIGIN"]][d]["rain"] or Weather[line["ORIGIN"]][d]["snow"] or Weather[line["ORIGIN"]][d]["hail"] or Weather[line["ORIGIN"]][d]["thunder"] or Weather[line["ORIGIN"]][d]["tornado"],
Weather[line["ORIGIN"]][d]["fog"],
Weather[line["ORIGIN"]][d]["rain"],
Weather[line["ORIGIN"]][d]["snow"],
Weather[line["ORIGIN"]][d]["hail"],
Weather[line["ORIGIN"]][d]["thunder"],
Weather[line["ORIGIN"]][d]["tornado"],
Weather[line["DEST"]][d]["fog"] or Weather[line["DEST"]][d]["rain"] or Weather[line["DEST"]][d]["snow"] or Weather[line["DEST"]][d]["hail"] or Weather[line["DEST"]][d]["thunder"] or Weather[line["DEST"]][d]["tornado"],
Weather[line["DEST"]][d]["fog"],
Weather[line["DEST"]][d]["rain"],
Weather[line["DEST"]][d]["snow"],
Weather[line["DEST"]][d]["hail"],
Weather[line["DEST"]][d]["thunder"],
Weather[line["DEST"]][d]["tornado"],
Weather[line["ORIGIN"]][d]["fog"] or Weather[line["ORIGIN"]][d]["rain"] or Weather[line["ORIGIN"]][d]["snow"] or Weather[line["ORIGIN"]][d]["hail"] or Weather[line["ORIGIN"]][d]["thunder"] or Weather[line["ORIGIN"]][d]["tornado"] \
or Weather[line["DEST"]][d]["fog"] or Weather[line["DEST"]][d]["rain"] or Weather[line["DEST"]][d]["snow"] or Weather[line["DEST"]][d]["hail"] or Weather[line["DEST"]][d]["thunder"] or Weather[line["DEST"]][d]["tornado"],
]
# Mark off whether the flight was cancelled/diverted or delayed.
can = line["CANCELLED"] == "1.00" or line["DIVERTED"] == "1.00"
isdelayed = not can and (float(line["ARR_DELAY"]) >= 20.0)
isontime = not can and (float(line["ARR_DELAY"]) <= 5.0)
for i in range(len(w)) :
if w[i] == False :
cancelled[i][0].append(can)
delayed[i][0].append(isdelayed)
ontime[i][0].append(isontime)
elif w[i] == True :
cancelled[i][1].append(can)
delayed[i][1].append(isdelayed)
ontime[i][1].append(isontime)
# Add arrival delay time info.
if line["ARR_DELAY"] != "" :
for i in range(len(w)) :
if w[i] == False :
arrdelay[i][0].append(float(line["ARR_DELAY"]))
elif w[i] == True :
arrdelay[i][1].append(float(line["ARR_DELAY"]))
for obs in range(len(obs_cols)) :
for yn in (0, 1) :
if obs == 0 and yn == 0 : # the "all" condition represents all flights and has no False part
continue
row = []
row.extend(key)
row.append(first_flight)
row.append(last_flight)
o = obs_cols[obs]
if yn == 0 :
o += "_no"
elif obs > 0 :
o += "_yes"
row.append(o)
# Drop rows where we have insufficient data to
# do any analysis.
if len(cancelled[obs][yn]) < 3 :
continue
row.append(len(cancelled[obs][yn]))
row.append(mean(cancelled[obs][yn]))
row.append(mean(delayed[obs][yn]))
row.append(mean(ontime[obs][yn]))
row.extend(percentiles(arrdelay[obs][yn]))
writer.writerow(row)
| Python |
#!/usr/bin/python -S
"""
curl_.py
"""
import os
import sys
import time
import urllib2
import tnet
class Error(Exception):
pass
def log(msg, *args):
if args:
msg = msg % args
# TODO: Sometimes when running through xmap.py, I get this -- EWOULDBLOCK.
# Why? Oh because in xmap.py, I capture stderr and set it to nonblocking.
# Why doesn't that happen with stdout? I don't have much output here, but
# maybe it's because stderr is unbuffered?
# File "./sleep_.py", line 25, in log
# print >>sys.stderr, msg
# IOError: [Errno 11] Resource temporarily unavailable
print >>sys.stderr, 'curl_: ' + msg
def CheckNonblocking():
# Test to see if stdout fd is changed
import fcntl
fd = sys.stdout.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
nonblocking = flags & os.O_NONBLOCK
log('stdout nonblocking? %s', nonblocking)
def inner_main_iter(argv):
"""
One iteration of batch main.
"""
log('argv %s', argv)
CheckNonblocking()
for url in argv:
# Here we have a known error, which is handled.
if url == 'BAD':
raise Error("Got BAD")
# Unhandled error.
if url == 'CRASH':
raise RuntimeError("Got CRASH")
log('url %s', url)
f = urllib2.urlopen(url)
s = f.read()
print s
log('wrote %d bytes', len(s))
CheckNonblocking()
def main_iter(argv):
"""Error handling wrapper."""
try:
inner_main_iter(argv)
return 0
except Error, e:
print >> sys.stderr, e.args[0]
return 1
def loop(argv):
"""Returns an exit code."""
boundary = os.getenv('PGI_BOUNDARY')
assert boundary, 'PGI_BOUNDARY required'
pid = os.getpid()
# Simulate slow startup.
log('Hello from curl_.py, pid %d. Sleeping 1 second', pid)
time.sleep(1)
while True:
log('waiting for PGI input')
try:
request = tnet.load(sys.stdin)
except EOFError:
break
# Write header
header = {'stdout_boundary': boundary}
sys.stdout.write(tnet.dumps(header))
# Call main_iter
log('got %r', request)
argv = request['argv']
status = main_iter(argv)
log('status = %d', status)
# End stdout -- WITH NEWLINE
print boundary
# Write trailer
trailer = {'status': status}
sys.stdout.write(tnet.dumps(trailer))
sys.stdout.flush()
log('flushed')
# ISSUE: We could write more than we're entitled to. I think 'waiting for
# PGI input' is going to be lost.
print >>sys.stderr, boundary
return 0
def main(argv):
CheckNonblocking()
if os.getenv('PGI'):
return loop(argv)
else:
return main_iter(argv[1:])
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/python -S
"""
Usage:
fly [options] run [--] <dir> <args>...
fly [options] stop <dir>
fly [options] state <dir>
fly -h | --help
fly --version
Actions:
run Run the given command. If a process doesn't exist already, it will
start it. It won't start more than once process per state directory.
stop Stop a process. The only reason to use this is to reclaim resources
on your machine.
state Show process states.
Options:
-t --timeout=SECS How long to wait for a lock.
-p --poll-interval=SECS How often to check for failed processes.
-p --poll-interval=SECS How often to check for failed processes.
Example:
$ fly run -- ~/fly/count --sleep 1 # Run 'count', passing it a flag.
$ fly stop ~/fly/count # Stop it
$ fly state ~/fly/ # Show state
A command 'fly run -- ~/fly/count' is usually wrapped in a shell script called
'count'.
"""
# TODO:
#
# - ISSUE: If you Ctrl-C, then data can be sitting around in the pipe without
# getting read. And then it will appear the NEXT time. Need to fix this up.
#
# - Mitigated for now
# - BUG: same issue as xmap. CHILD process stdout can get blocked with
# EWOULDBLOCK. Can the child use blocking I/O and the parent use
# nonblocking?
#
# - Proxy stdin. It should just be a field like argv? I guess there are tools
# which take small stdin. This requires more "wrapping" of main(). main()
# should probably look like main(argv, stdin) then.
# - stdin_boundary
# - stdin
#
# - Test out fly with xmap.
# - Use case: Load some data in memory and do a join?
# - That's a use case for xmap alone.
# - Load some big data and do queries from the command line.
# - Some pre-aggregations? e.g. guar pattern stats
# - If it takes really long, should you init the process? I think that's
# better.
#
# - ISSUE: If the child process unexpectly exits (e.g. a Python stack trace),
# then the fly error can be obscure -- you can get "Interrupted system call"
# on tnet read. This is with count_.py.
#
# Helpers:
# - Install script. Generates:
# fly run state/PROG -- "$@"
#
# IDEA: You could proxy it? Fly itself could start as a PGI process. For a
# given run, it would record all processes that it started. And then you can
# kill them, and kill fly. Is it fly_?
# Or maybe fly?
#
#
# Concurrency:
#
# Uses <state-dir>/mutex.
#
# User considerations:
#
# One process per user, per machine. So the state root could be in ~ if it's
# local dir. If ~ is NFS, then it should be somewhere else on local disk.
__author__ = 'Andy Chu'
import errno
import fcntl
import optparse
import os
import select
import shutil
import signal
import subprocess
import sys
import threading
import time
import tnet
class Error(Exception):
pass
ANSI_BOLD = '\033[1m'
ANSI_RESET = '\033[0;0m'
ANSI_RED = '\033[31m'
_output_writer = None
class OutputWriter(object):
def __init__(self, verbose):
self.verbose = verbose
class PlainOutputWriter(OutputWriter):
def log(self, s):
if self.verbose:
self._log(s)
def _log(self, s):
print >>sys.stderr, 'fly: ' + s
error = _log
def stderr(self, s):
# No newline
sys.stderr.write(s)
class ColorOutputWriter(OutputWriter):
def log(self, s):
if self.verbose:
self._log(s)
def _log(self, s):
print >>sys.stderr, ANSI_BOLD + ANSI_RED + 'fly: ' + ANSI_RESET + s
error = _log
def stderr(self, s):
# No newline
sys.stderr.write(ANSI_BOLD + s + ANSI_RESET)
def log(msg, *args):
if args:
msg = msg % args
_output_writer.log(msg)
def IsRunning(pid):
# Unix signal 0 is a pseudo-signal that can be used to check if a process with
# the given PID exists.
try:
os.kill(pid, 0)
except OSError, e:
if e.errno == errno.ESRCH:
return False
else:
raise
return True
def ReadPid(pid_name):
try:
f = open(pid_name)
except IOError:
return None
pid_str = f.read()
f.close()
try:
pid = int(pid_str)
except ValueError:
raise Error('Invalid contents of PID file %r: %r' % (pid_name, pid_str))
return pid
def GetWorkerState(bundle_dir):
"""
Check the process state and return the PID and handles to open pipes.
Returns either:
None if no process
or (pid, in_fd, out_fd, err_fd) ?
"""
id_str = 'live'
# NOTE: You don't need the PID in a lot of cases
# PID checks if the state is "finished", since that is written AFTER
# stdin/etc.
pid_name = os.path.join(bundle_dir, id_str, 'pid.txt')
pid = ReadPid(pid_name)
# No PID file -- assume there is no process running, and we have to create
# it
if pid is None:
return None
in_name = os.path.join(bundle_dir, id_str, 'stdin')
out_name = os.path.join(bundle_dir, id_str, 'stdout')
err_name = os.path.join(bundle_dir, id_str, 'stderr')
in_fd = os.open(in_name, os.O_RDWR)
out_fd = os.open(out_name, os.O_RDWR)
err_fd = os.open(err_name, os.O_RDWR)
return pid, in_fd, out_fd, err_fd
def StartAndCreateState(bundle_dir):
# 'command' must be a symlink to the program (or the program itself)
unix_argv = [os.path.join(bundle_dir, 'command')]
log('Starting %s', unix_argv)
# No running process should be creating a directory with this name on the
# machine. We can't use the CHILD (PGI) process ID because we need to create
# the named pipes in this dir before we know its PID!
pipe_dir = os.path.join(bundle_dir, 'live')
try:
os.mkdir(pipe_dir)
except OSError, e:
if e.errno == errno.EEXIST:
# pid.txt doesn't exist but 'live' does. This is technically an
# inconsistent state, but we can easily bring it back to consistent.
pass
else:
raise
in_name = os.path.join(pipe_dir, 'stdin')
out_name = os.path.join(pipe_dir, 'stdout')
err_name = os.path.join(pipe_dir, 'stderr')
os.mkfifo(in_name)
os.mkfifo(out_name)
os.mkfifo(err_name)
# TODO: Open for read or write only?
in_fd = os.open(in_name, os.O_RDWR)
out_fd = os.open(out_name, os.O_RDWR)
err_fd = os.open(err_name, os.O_RDWR)
# Add PGI
env = dict(os.environ)
env['PGI'] = '1'
# Suggestion -- child processes can override it
env['PGI_BOUNDARY'] = '__pgi_end__'
# It appears we don't need to daemonize the PGI process, because it's
# connected to named pipes rather than the terminal. So it shouldn't die when
# the terminal goes away.
p = subprocess.Popen(unix_argv, stdin=in_fd, stdout=out_fd, stderr=err_fd, env=env)
pid_name = os.path.join(pipe_dir, 'pid.txt')
f = open(pid_name, 'w')
f.write(str(p.pid) + '\n')
f.close()
return p.pid, in_fd, out_fd, err_fd
# TODO: Make it the pipe size?
CHUNK_SIZE = 4096
def ReadLines(fd, boundary, lines, write_func):
done = False
while True:
try:
# ISSUE: You can read too much here! Do you really have to read char by
# char? That seems lame.
# I guess you want to avoid reading the trailer.
chunk = os.read(fd, CHUNK_SIZE)
except OSError, e:
if e.errno == errno.EWOULDBLOCK:
log('EWOULDBLOCK')
break
lines += chunk.splitlines(True)
log('%d byte chunk', len(chunk))
log('chunk end %r', chunk[-20:])
i = 0
for line in lines:
if line.endswith(boundary):
log('PGI END DONE')
done = True # outer-outer loop
# +1 for the boundary
lines = lines[i+1:]
break # inner loop
write_func(line)
i += 1 # lines printed
return done, lines
# BUG: I think we are setting the CHILD's fd too somehow. Need to sort it out.
def SetNonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def SetBlocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
def Run(argv, bundle_dir, writer, exit_fd, poll_thread):
# Desired invariant:
#
# EITHER:
# 1) There are no PGI processes and the 'live' dir is empty
# 2) There is one PGI process and the 'live' dir contains 3 named pipes, and a
# 'pid.txt' file.
#
# Inconsistent conditions that need to be fixed:
#
# 1) There is a 'live' dir (and possibly some named pipes), but no process is
# up. This can be checked by IsRunning().
# 2) There is a process up, but no 'live' dir. This can't be detected. Thus,
# in the 'shutdown' action, we should make sure the process is really killed
# before removing the state dir.
result = GetWorkerState(bundle_dir)
start_new = True
if result:
pid, in_fd, out_fd, err_fd = result
# Check that it's running
if IsRunning(pid):
start_new = False
log('Reusing existing process with PID %d', pid)
else:
log('pid.txt contains %d but process is not up; cleaning up', pid)
RemoveLive(bundle_dir)
if start_new:
pid, in_fd, out_fd, err_fd = StartAndCreateState(bundle_dir)
log('Started new process with PID %d', pid)
# Start watching the pid now
poll_thread.set_pid(pid)
poll_thread.start()
env = dict(os.environ) # os.environ is apparently not a plain dict
request = {'argv': argv, 'cwd': os.getcwd(), 'env': env}
req_str = tnet.dumps(request)
log('Writing request %r', req_str)
os.write(in_fd, req_str)
# Hm this is dumb:
# http://stackoverflow.com/questions/2804543/read-subprocess-stdout-line-by-line
#p.stdin.flush()
#outfile = os.fdopen(out_fd)
#errfile = os.fdopen(err_fd)
log('Waiting for initial TNET record on stdout')
# TODO: See bug above. For the 'stdout' case, catch EINTR here?
obj = tnet.loadfd(out_fd)
if 'stdout' in obj: # Whole response was returned
sys.stdout.write(obj['stdout'])
# Hm -- should we process stderr in the same way? Some things like
# "Segmentation fault" are not controlled by the application. It isn't
# likely that they can redirect it to stderr.
# Should we just look at ACTUAL? I think that may make more sense. We
# could enter the select loop. Need some examples.
s = obj.get('stderr')
if s:
writer.stderr(s)
status = obj.get('status', 0)
elif 'stdout_boundary' in obj: # Response will be streamed
boundary = obj['stdout_boundary']
boundary += '\n'
log('stdout_boundary = %r', boundary)
log('Setting stdout/stderr nonblocking')
SetNonblocking(out_fd)
SetNonblocking(err_fd)
# Enter select loop
stdout_done = False
stdout_lines = []
stderr_done = False
stderr_lines = []
readable = []
while not stdout_done or not stderr_done:
fd_set = [out_fd, err_fd, exit_fd]
log('asking : %s', fd_set)
try:
readable, _, _ = select.select(fd_set, [], [])
except select.error, e:
# If SIGCHLD occurs while we're waiting, EINTR will happen. Are there
# any other cases where EINTR happens?
if e.args[0] == errno.EINTR:
# TODO: Add pid to message
# TODO: Make sure pipes are empty?
# TODO: Clean up PID file and so forth
log('Got EINTR')
try:
RemoveLive(bundle_dir)
finally:
raise Error('Got unexpected child death (EINTR)')
log('readable: %s', readable)
# If we're not the parent, we should get an error here, rather than when
# waiting for select().
# TODO: We could disable interrupts in select() and rely on the pipe?
if exit_fd in readable:
try:
RemoveLive(bundle_dir)
finally:
raise Error('Got unexpected child death (exit_fd = %d)' % exit_fd)
# ISSUE: we can read more than we were entitled to!
if err_fd in readable:
# stderr should be unbuffered, so just read one line. We can't use
# readline() because of buffering again. Doh.
try:
line = ''
while True:
c = os.read(err_fd, 1)
line += c
if c == '\n':
break
except OSError, e:
if e.errno == errno.EWOULDBLOCK:
log('EWOULDBLOCK')
break
if line.endswith(boundary):
log('STDERR DONE')
stderr_done = True
if not stderr_done:
writer.stderr(line)
#stderr_done, stderr_lines = ReadLines(
# err_fd, boundary, stderr_lines, writer.stderr)
# TODO: Could add a flag to buffer stdout like stderr, at the expense of
# speed.
if out_fd in readable:
stdout_done, stdout_lines = ReadLines(
out_fd, boundary, stdout_lines, sys.stdout.write)
rest = ''.join(stdout_lines)
log('Reading trailer from %r', rest)
trailer, rest = tnet.loads_prefix(rest)
if rest:
log('WARNING: Unexpected trailing bytes on stdout: %r', rest)
status = trailer.get('status', 0)
else:
raise Error(
"Expected 'stdout' or 'stdout_boundary' to be in response %r" % obj)
# set back to blocking
SetBlocking(out_fd)
SetBlocking(err_fd)
log('Done')
return status
def RemoveLive(bundle_dir):
live_dir = os.path.join(bundle_dir, 'live')
# Possible errors:
# errno.ENOENT: 'live' directory doesn't exist. fine to ignore this
# errno 39: directory not empty. Not sure why we would get this but it may
# be a bug in rmtree. Passing ignore_errors seems safe.
shutil.rmtree(live_dir, ignore_errors=True)
log('Removed %s', live_dir)
def Stop(bundle_dir):
"""
This can also be implemented in shell like:
$ find <dir> -name pid.txt | xargs cat | xargs kill
"""
live_dir = os.path.join(bundle_dir, 'live')
pid_filename = os.path.join(live_dir, 'pid.txt')
try:
f = open(pid_filename)
except IOError, e:
if e.errno == errno.ENOENT:
log("%r doesn't exist", pid_filename)
else:
log("Couldn't open %r", pid_filename)
else:
pid_str = f.read()
f.close()
try:
pid = int(pid_str)
except ValueError:
raise Error("Invalid PID file contents %r" % pid_str)
try:
log('Sending SIGTERM to %d', pid)
os.kill(pid, signal.SIGTERM)
except OSError, e:
if e.errno == errno.ESRCH:
log("Tried to kill PID %d but it isn't running.", pid)
else:
for i in xrange(10):
if not IsRunning(pid):
log('Process %d successfully stopped.', pid)
break
time.sleep(0.1)
else:
log("Process %d didn't stop after 1 second.", pid)
# Remove it even if we didn't kill a process
RemoveLive(bundle_dir)
def State(root_dir):
"""
Args:
root_dir: the PARENT of fly bundles
"""
rows = []
for name in os.listdir(root_dir):
bundle_dir = os.path.join(root_dir, name)
pid_name = os.path.join(bundle_dir, 'live/pid.txt')
command = os.path.join(bundle_dir, 'command')
try:
target = os.readlink(command)
except OSError:
continue
pid = ReadPid(pid_name)
rows.append((bundle_dir, target, pid))
if not rows:
_output_writer.error("No bundles in %r" % root_dir)
return 1
# TODO: print structured version of this table too
template = '%5s %s %-20s %s'
print template % ('PID', '?', 'command', 'bundle')
for bundle_dir, target, pid in rows:
if pid:
pid_str = str(pid)
if IsRunning(pid):
r = 'Y'
else:
# TODO: flag this for cleanup
r = 'N'
else:
pid_str = ' '
r = ' '
print template % (pid_str, r, target, bundle_dir)
return 0
class Mutex(object):
def __init__(self, bundle_dir):
path = os.path.join(bundle_dir, 'mutex')
try:
# Need to open for write to do lockf() !
self.f = open(path, 'w')
except IOError:
raise Error("Invalid installation: %r doesn't exist" % path)
def acquire(self):
log('Waiting for lock on %r', self.f)
fcntl.lockf(self.f.fileno(), fcntl.LOCK_EX)
# For some reason, flock() create locks that are inherited by the child
# process! (even though documentation says otherwise). So it is unusable
# here. Not sure what the real difference is between lockf and flock, but
# lockf works.
#fcntl.flock(self.f.fileno(), fcntl.LOCK_EX)
def release(self):
self.f.close()
log('RELEASED mutex')
_wait_count = 0
def SigchldHandler(signum, frame):
"""
Python runs signal handlers on its interpreter loop, not from the actual C
signal handler, so we can do real work here.
"""
# signum is always 17, probably not useful
# BUG: if there is an exception in the child (file doesn't exist), then
# subprocess calls os.waitpid, reaping the child. we get the signal here but
# have nothing to reap.
global _wait_count
log('WAITING (count = %d)', _wait_count)
# In case signals are coalesced, do it multiple times
# I think this is correct in Torn, but is actually not needed here, because we
# only start one child process! TODO: fix
while True:
try:
pid, status = os.wait()
except OSError, e:
if e.errno == errno.ECHILD: # "No child processes"
break
else:
raise
log('pid = %s, status =%s', pid, status)
_wait_count += 1
log('WAITED %d times', _wait_count)
class ProcessPoller(threading.Thread):
"""Polls to see if a given PID is running.
It's annoying to have to poll the child process. The other option was to
create "fly parent" process that was the parent of all PGI processes. It's
job would be to handle SIGCHLD and notify the fly clients. See README.txt for
details on why we didn't choose that.
"""
def __init__(self, poll_interval, exit_pipe_write_fd):
threading.Thread.__init__(self)
self.pid = None
self.poll_interval = poll_interval
self.exit_pipe_write_fd = exit_pipe_write_fd
def set_pid(self, pid):
"""This is set after construction."""
self.pid = pid
def run(self):
assert self.pid is not None
while True:
time.sleep(self.poll_interval)
log('Checking PID %d', self.pid)
# send signal 0
if not IsRunning(self.pid):
# Wake up the thread
os.write(self.exit_pipe_write_fd, 'x')
def CreateOptionsParser():
parser = optparse.OptionParser()
parser.add_option(
'-p', '--poll-interval', dest='poll_interval', type='float', default=0.5,
help='However often to poll for the existence of the worker process. This '
'is only necessary for protection against PGI processes that '
'improperly exit, rather than reporting errors through stdout/stderr. '
"If you're sure that the PGI process won't exit unexpectedly, it's "
'safe to set this to 0 to disable polling.')
parser.add_option(
'-t', '--timeout', dest='timeout', type='int', default=3,
help='Number of seconds to wait to acquire a lock.')
# TODO: Timeout action? You could launch a real copy of the original process
# after a timeout?
# TODO: Hook verbose up
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true', default=False,
help='Show verbose logging output.')
parser.add_option(
'--color', dest='color', default='always',
choices=['none', 'always'],
help='Whether to print stderr in color.')
# NOTE: stdout protocol is detected by the first record from the PGI process.
parser.add_option(
'--stdin', dest='stdin', default='none',
choices=['none', 'record', 'streaming'],
help='How to handle stdin. "none" disallows stdin. "record" buffers '
'the entire stdin stream in the fly process, then passes it '
'to the PGI process. "streaming" tells fly to stream its stdin '
'to the PGI process stdin.')
return parser
def main(argv):
(options, argv) = CreateOptionsParser().parse_args(argv)
# This has to come first because it's used for handling all errors.
global _output_writer
if options.color == 'always':
_output_writer = ColorOutputWriter(options.verbose)
elif options.color == 'none':
_output_writer = PlainOutputWriter(options.verbose)
else:
raise AssertionError
try:
action = argv[0]
dir_name = argv[1]
except IndexError:
raise Error('Usage: fly <action> <state dir> [args]')
# We need to call os.wait() to avoid zombie processes. This is because we
# NEVER call p.wait() on the subprocess handle, as most programs do.
# (Instead, this fly process dies and orphans the PGI process.)
#
# To keep the logic simple, we don't try to notify of process death in the
# signal handler. This would only happen on invocation 1 because that's the
# only time that fly is the parent of the PGI process.
signal.signal(signal.SIGCHLD, SigchldHandler)
exit_pipe_read_fd, exit_pipe_write_fd = os.pipe()
if options.poll_interval:
poll_thread = ProcessPoller(options.poll_interval, exit_pipe_write_fd)
poll_thread.setDaemon(True)
else:
log('Not checking for PGI process death')
if action == 'run':
bundle_dir = dir_name
m = Mutex(bundle_dir)
m.acquire() # TODO: can add a timeout on acquisition
try:
return Run(argv[2:], bundle_dir, _output_writer, exit_pipe_read_fd,
poll_thread)
finally:
# TODO: technically this could be released when stdin is done writing, so
# another process could write on stdin. But do that later.
m.release()
elif action == 'stop':
bundle_dir = dir_name
m = Mutex(bundle_dir)
m.acquire()
try:
return Stop(bundle_dir)
finally:
m.release()
elif action == 'state':
return State(dir_name)
else:
raise Error('Unknown action %r' % action)
# Other actions:
# - Stop all processes? Nah that should be separate, can be done with "find"
# over state dir root.
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except Error, e:
_output_writer.error(e.args[0])
# A somewhat random 7-bit return value. This is used to distinguish fly
# errors from errors in the called process.
sys.exit(121)
| Python |
#!/usr/bin/python -S
"""
See how much slower it is to read char by char vs line-by line.
"""
__author__ = 'Andy Chu'
import os
import sys
import time
class Error(Exception):
pass
def main(argv):
"""Returns an exit code."""
style = argv[1]
if style == 'char':
while True:
c = os.read(sys.stdin.fileno(), 1)
if c == '':
break
sys.stdout.write(c)
elif style == 'line':
while True:
line = sys.stdin.readline()
if line == '':
break
sys.stdout.write(line)
else:
raise RuntimeError(style)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except Error, e:
print >> sys.stderr, e.args[0]
sys.exit(1)
| Python |
#!/usr/bin/python -S
"""
lock_demo.py
"""
__author__ = 'Andy Chu'
import sys
import fcntl
import time
class Error(Exception):
pass
def main(argv):
"""Returns an exit code."""
FILE = 'lock'
# This could just be 'r' if we pre-create the lock file.
print 'Opening'
f = open(FILE, "w")
print 'Waiting for lock'
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
print 'Sleeping'
# Just hold the lock
time.sleep(int(argv[1]))
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except Error, e:
print >> sys.stderr, e.args[0]
sys.exit(1)
| Python |
#!/usr/bin/python -S
"""
interleave.py
"""
__author__ = 'Andy Chu'
import os
import sys
import tnet
class Error(Exception):
pass
def main(argv):
"""Returns an exit code."""
boundary = os.getenv('PGI_BOUNDARY')
assert boundary, 'PGI_BOUNDARY required'
while True:
try:
request = tnet.load(sys.stdin)
except EOFError:
break
# initial
header = {'stdout_boundary': boundary}
sys.stdout.write(tnet.dumps(header))
lines = int(request['argv'][0])
for i in xrange(lines):
print 'stdout: %d' % i
print >>sys.stderr, 'stderr: %d' % i
print 'pid', os.getpid()
# end stdout
print boundary
# end stderr
print >>sys.stderr, boundary
trailer = {'status': 0}
sys.stdout.write(tnet.dumps(trailer))
# This is necessary
sys.stdout.flush()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/python -S
"""
fly_test.py: Tests for fly.py
"""
import os
import unittest
import fly # module under test
class FlyTest(unittest.TestCase):
def testPipes(self):
# TODO: This test depends on real files.
result = fly.GetWorkerState('fly-state/curl')
# This fails if the process isn't up!
assert result, result
pid, in_fd, out_fd, err_fd = result
print 'Writing hi'
os.write(in_fd, 'hi\n')
#print 'reading'
#bytes = os.read(resp, 1)
#print bytes
def testIsRunning(self):
print fly.IsRunning(0)
print fly.IsRunning(9999)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python -S
"""
count_.py
Demonstrates the simple protocol, where you just print a record with 'stdout',
instead of a header with 'stdout_boundary', and then the actual stdout.
"""
import os
import json
import sys
import time
import tnet
def log(msg, *args):
if args:
msg = msg % args
print >>sys.stderr, 'count_: ' + msg
# this "persists" betwen invocations
count = 0
# The old one goes here.
def main_iter(request):
global count
count += 1
log('incremented count to %d', count)
argv = request['argv']
stdout = 'count = %d\n' % count
stdout += json.dumps(request, indent=2)
# Write response. With the global.
response = {
'stdout': stdout,
# TODO: stderr should really go on actaul stderr.
'stderr': 'exit %d\n' % count,
'status': count
}
return response
# TODO: Factor this out into a library
def loop(argv):
boundary = os.getenv('PGI_BOUNDARY')
assert boundary, 'PGI_BOUNDARY required'
# Simulate slow startup.
pid = os.getpid()
log('Hello from count_.py, pid %d. Sleeping 1 second', pid)
time.sleep(1)
while True:
log('waiting for PGI input')
try:
request = tnet.load(sys.stdin)
except EOFError:
break
cwd = request['cwd']
os.chdir(cwd)
response = main_iter(request)
sys.stdout.write(tnet.dumps(response))
sys.stdout.flush()
log('flushed')
# ISSUE: We could write more than we're entitled to. I think 'waiting for
# PGI input' is going to be lost.
print >>sys.stderr, boundary
return 0
def main(argv):
if os.getenv('PGI'):
return loop(argv)
else:
# Run in non-persistent mode
return main_iter(argv[1:])
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/python
## mkboard.py -- atmel pio mux utility
##
## Copyright 2006, Brian Swetland. All rights reserved.
## See provided LICENSE file or http://frotz.net/LICENSE for details.
##
import os, sys, string
# pindef -> num, out, pull, pio, sela, selb
reg_output_disable = 0
reg_output_enable = 0
reg_pullup_disable = 0
reg_pullup_enable = 0
reg_pio_disable = 0
reg_pio_enable = 0
reg_select_a = 0
reg_select_b = 0
def setup_registers(pindef):
global reg_output_disable
global reg_output_enable
global reg_pullup_disable
global reg_pullup_enable
global reg_pio_disable
global reg_pio_enable
global reg_select_a
global reg_select_b
(num, out, pull, pio, sela, selb) = pindef
bit = 1 << num
if out:
reg_output_enable |= bit
reg_output_disable &= (~bit)
else:
reg_output_enable &= (~bit)
reg_output_disable |= bit
if pull:
reg_pullup_enable |= bit
reg_pullup_disable &= (~bit)
else:
reg_pullup_enable &= (~bit)
reg_pullup_disable |= bit
if pio:
reg_pio_enable |= bit
reg_pio_disable &= (~bit)
else:
reg_pio_enable &= (~bit)
reg_pio_disable |= bit
if sela:
reg_select_a |= bit
if selb:
reg_select_b |= bit
def import_pindef(fn):
pass
def read_pins_def(fn, table):
output = ""
fd = open(fn,'r')
for line in fd.xreadlines():
line = line.split('#')[0].strip()
if not line: continue
(gpio,pa,pb) = line.split()
num = int(gpio[2:])
table[gpio+"_IN"] = (num, 0, 0, 1, 0, 0)
table[gpio+"_IN_PULLUP"] = (num, 0, 1, 1, 0, 0)
table[gpio+"_OUT"] = (num, 1, 0, 1, 0, 0)
table[gpio+"_"+pa] = (num, 0, 0, 0, 1, 0)
table[gpio+"_"+pb] = (num, 0, 0, 0, 0, 1)
return output
def read_board_def(fn, table):
pins = {}
output = ""
for n in range(0,32):
pins[n] = ''
fd = open(fn,'r')
for line in fd.xreadlines():
line = line.split('#')[0].strip()
if not line: continue
if len(line.split('=')) == 2:
(line,func) = line.split('=')
line = line.strip()
func = func.strip()
else:
func = ''
parts = line.split()
if len(parts) < 2:
print "ERROR: invalid definition '%s'" % line
sys.exit(1)
if not func:
if (parts[1] == 'IN') or (parts[1] == 'OUT'):
func = parts[0]
else:
func = parts[1]
pin = string.join(parts,"_")
if not table.has_key(pin):
print "ERROR: pin '%s' does not exist" % pin
sys.exit(1)
pindef = table[pin]
num = pindef[0]
if pins[num]:
print "ERROR: pin '%s' conflicts with pin '%s'" % (pin, pins[num])
sys.exit(1)
pins[num] = pin
setup_registers(pindef)
output += "#define PIN_%-12s (1 << %d)\n" % (func, num)
return output
table = {}
output = ""
for fn in sys.argv[1:]:
if fn.endswith('.pins'):
if table:
print "ERROR: only one pin definition file allowed"
sys.exit(1)
output = read_pins_def(fn, table)
continue
if fn.endswith('.def'):
if not table:
print "ERROR: must specify a pin definition file first"
sys.exit(1)
reg_output_disable = 0xffffffffL
reg_output_enable = 0L
reg_pullup_disable = 0L
reg_pullup_enable = 0xffffffffL
reg_pio_disable = 0L
reg_pio_enable = 0xffffffffL
reg_select_a = 0L
reg_select_b = 0L
output = read_board_def(fn, table)
fd = open(fn[:-4] + ".h", 'w')
fd.write("/* DO NOT EDIT -- AUTOGENERATED FROM '%s' */\n\n" % fn)
fd.write("#ifndef __BOARD_DEFINITION_FILE__\n")
fd.write("#define __BOARD_DEFINITION_FILE__\n\n")
fd.write(output)
fd.write("\n")
fd.write("#define BOARD_OUTPUT_DISABLE 0x%08x\n" % reg_output_disable)
fd.write("#define BOARD_OUTPUT_ENABLE 0x%08x\n" % reg_output_enable)
fd.write("#define BOARD_PULLUP_DISABLE 0x%08x\n" % reg_pullup_disable)
fd.write("#define BOARD_PULLUP_ENABLE 0x%08x\n" % reg_pullup_enable)
fd.write("#define BOARD_PIO_DISABLE 0x%08x\n" % reg_pio_disable)
fd.write("#define BOARD_PIO_ENABLE 0x%08x\n" % reg_pio_enable)
fd.write("#define BOARD_SELECT_A 0x%08x\n" % reg_select_a)
fd.write("#define BOARD_SELECT_B 0x%08x\n" % reg_select_b)
fd.write("\n#endif\n")
fd.close()
continue
print "ERROR: what is '%s'?" % fn
sys.exit(1)
| Python |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| Python |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| Python |
import time
db=DAL()
db.define_table('a',Field('b'))
while True:
t0=time.time()
id=db.a.insert(b='x'*512)
db.commit()
print id, time.time()-t0
| Python |
import sys, os, re, time, datetime
ticker=sys.argv[1]
print sys.argv
productid=db.product(name=ticker).id
filename=os.path.join('applications',request.application,'modules',ticker+'.log')
filenameidx=os.path.join('applications',request.application,'modules',ticker+'.idx')
re_match=re.compile('(?P<t>\d+(\.\d+)?)\: match (?P<q>\d+)\@(?P<p>\d+(\.\d+)?) from (?P<s>\d+)\((?P<si>\d+)\) to (?P<b>\d+)\((?P<bi>\d+)\)')
try:
i=int(open(filenameidx,'rb').read())
except IOError:
i=0
data = ''
while True:
watcher = os.stat(filename)
j=watcher.st_size
if j>i:
file=open(filename,'rb')
print 'loadin...'
file.seek(i)
data += file.read(j-i)
file.read()
i=j
while True:
match = re_match.search(data)
if match:
print match.group()
now=datetime.datetime.now()
quantity=int(match.group('q'))
price=float(match.group('p'))
seller=match.group('s')
buyer=match.group('b')
amount=price*quantity
db.match.insert(quantity=quantity,
product=productid,
price=price,
seller=seller,
buyer=buyer,
sell_oid=match.group('si'),
buy_oid=match.group('bi'),
match_time=match.group('t'),
created_on=now)
db(db.auth_user.id==buyer).update(actual_cash=db.auth_user.actual_cash-amount)
db(db.auth_user.id==seller).update(actual_cash=db.auth_user.actual_cash+amount)
db.commit()
data=data[match.end():]
open(filenameidx,'wb').write(str(i))
else:
break
time.sleep(0.01)
| Python |
import random, hmac, urllib, time, optparse
def robot_order(price,owner='1,2'):
owners=str(owner).split(',')
owner=owners[random.randint(0,len(owners)-1)]
i=random.randint(0,3)
p=price
if i is 1:
p=p-abs(random.gauss(0,10)) # dollars
if p<=20: p=20+abs(random.gauss(0,20))
elif i is 3:
p=p+abs(random.gauss(0,10)) # dollars
if p>=180: p=180-abs(random.gauss(0,20))
p = 0.01*int(p*100)
n=random.randint(1,1000) # shares
if i==0: o='%s:buy %i@%s' % (owner,n,0)
elif i==1: o='%s:buy %i@%s' % (owner,n,p)
elif i==2: o='%s:sell %i@%s' % (owner,n,0)
elif i==3: o='%s:sell %i@%s' % (owner,n,p)
return o
def start_robot(url,owner=0,hmac_key='secret',wait_time=1):
while True:
t0 = time.time()
price = float(urllib.urlopen(url+'/quote').read())
order = robot_order(price,owner)
signature = hmac_key and hmac.new(hmac_key,order).hexdigest() or ''
params = urllib.urlencode({'order': order, 'signature': signature})
f = urllib.urlopen(url, params)
oid = int(f.read())
print "order #%i from %s (%fseconds)" % (oid,order,time.time()-t0)
time.sleep(float(wait_time))
if __name__=='__main__':
usage = "robot_trader -p 8888 -o 0 -k <hmac_key>"
version= ""
parser = optparse.OptionParser(usage, None, optparse.Option, version)
parser.add_option('-p',
'--port',
default='8888',
dest='port',
help='socket')
parser.add_option('-o',
'--owner',
default='0',
dest='owner',
help='the user id of the robot')
parser.add_option('-k',
'--hmac_key',
default='secret',
dest='hmac_key',
help='the hmac_key to sign orders')
parser.add_option('-w',
'--wait_time',
default='1',
dest='wait_time',
help='time between two trades in seconds')
(options, args) = parser.parse_args()
start_robot('http://127.0.0.1:%s' % options.port,owner=options.owner,hmac_key=options.hmac_key,wait_time=options.wait_time)
| Python |
#!/usr/bin/python
# example based on http://thomas.pelletier.im/2010/08/websocket-tornado-redis/
hmac_key = 'secret'
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import hmac
import re
import time
import sys
import optparse
import simplejson
(TYPE,QUANTITY,PRICE,STOP,OID,OWNER,TIMESTAMP) = ('type','quantity','price','stop','oid','owner','timestamp')
def prettyprint(oid,order,matches,state):
message = '[%s] from user %s\n' % (oid, order)
message += ' quotes: '+' '.join('%(quantity)s@%(price)s' % m for m in matches)+'\n'
for ell in ('mo_buy','mo_sell','lo_buy','lo_sell'):
message += ' '+ell+': '+' '.join('%(quantity)s@%(price)s' % x for x in state[ell])+'\n'
for ell in ('so_buy','so_sell'):
message += ' '+ell+': '+' '.join('%(quantity)s@%(price)s/%(stop)s' % x for x in state[ell])+'\n'
return message
class Engine:
"""
example of usage
>>> engine = Engine('intc')
>>> user = 1
>>> oid,matches = engine.process('1:buy intc 1',user) # market order
>>> oid,matches = engine.process('2:buy intc 1@50.6',user) # limit order
>>> oid,matches = engine.process('3:buy intc 1@50.6/49',user) # stop order
>>> engine.process('3:del intc %s' % oid, user) # delete order
>>> for match in matches: print match['quantity'], match['price'], match['buyer'], match['seller']
"""
re_order = re.compile('^((?P<o>\d+):)?(?P<t>(buy|sell|del))( (?P<n>[_a-z]+))? (?P<q>\d+)(\@(?P<p>\d+(\.\d+)?))?(/(?P<s>\d+(\.\d+)?))?$')
def __init__(self,ticker,price=100.0,logfilename=None):
self.logfile = open(logfilename or ticker+'.log','a')
self.price = price # initial trading price of securities
self.ticker = ticker
self.oid = 0
self.mo_buy = [] # queue of buy market orders (for single security)
self.mo_sell = [] # queue of sell market orders (for single security)
self.lo_buy = [] # queue of buy limit orders (for single security)
self.lo_sell = [] # queue of sell limit orders (for single security)
self.so_buy = [] # queue of buy stop orders (for single security)
self.so_sell = [] # queue of sell stop orders (for single security)
def state(self):
return dict(price=self.price,oid=self.oid,
mo_buy=self.mo_buy,mo_sell=self.mo_sell,
lo_buy=self.lo_buy,lo_sell=self.lo_sell,
so_buy=self.so_buy,so_sell=self.so_sell)
def process(self, order_text):
t0=time.time()
# parse order. owner is the id of the order submitter
match = self.re_order.match(order_text)
if not match or not match.group('n') in (None,self.ticker): return None, []
ticker = match.group('n') or self.ticker
owner = int(match.group('o') or 0)
type = match.group('t')
number = int(match.group('q'))
if number==0: return None, []
price = float(match.group('p') or 0)
stop = float(match.group('s') or 0)
mo_buy = self.mo_buy
mo_sell = self.mo_sell
lo_buy = self.lo_buy
lo_sell = self.lo_sell
so_buy = self.so_buy
so_sell = self.so_sell
# if this is a delete order remove it from the queue
if type == 'del':
for ell in (mo_buy,mo_sell,lo_buy,lo_sell,so_buy,so_sell):
if [ell.pop(i) for i,item in enumerate(ell) if item[OID] == number and item[OWNER] == owner]:
return number, []
# determine order id for current order
self.oid += 1
# this is the order
order = dict(type=type,quantity=number,price=price,stop=stop,
oid=self.oid,owner=owner,timestamp=t0)
self.logfile.write('%(timestamp)f: [%(oid)s] %(owner)s %(type)s %(quantity)s@%(price)s/%(stop)s\n' % order)
# this functions inserts o and sorts elements
def insert(queue,order,key,sign=1):
i,value = len(queue)-1,order[key]*sign
while i>=0:
if queue[i][key]*sign >= value: break
i -= 1
queue.insert(i+1,order)
# append it to proper queue
if stop: insert(so_buy,order,STOP,1) if type == 'buy' else insert(so_sell,order,STOP,-1)
elif price: insert(lo_buy,order,PRICE,1) if type == 'buy' else insert(lo_sell,order,PRICE,-1)
else: mo_buy.append(order) if type == 'buy' else mo_sell.append(order)
# this function can match orders from two give queues (bo,so)
def match(bo,so):
# get older orders
b,s = bo[0],so[0]
# find matching price
self.price = (b[PRICE] and s[PRICE]) and (b[PRICE]+s[PRICE])/2 or b[PRICE] or s[PRICE] or self.price
# find matching quantity (for partial fills)
matched_quantity = min(b[QUANTITY],s[QUANTITY])
# store match
match = {'type':'match','quantity':matched_quantity,
'price':self.price,'sell_oid':s[OID],
'seller':s[OWNER],'buy_oid':b[OID],
'buyer':b[OWNER],'timestamp':t0}
self.logfile.write('%(timestamp)f: match %(quantity)s@%(price)s from %(seller)s(%(sell_oid)s) to %(buyer)s(%(buy_oid)s)\n' % match)
self.logfile.flush()
# in case of partial fills resubmit orders
if b[QUANTITY] == matched_quantity: del bo[0]
else: bo[0].update({QUANTITY:b[QUANTITY]-matched_quantity})
if s[QUANTITY] == matched_quantity: del so[0]
else: so[0].update({QUANTITY:s[QUANTITY]-matched_quantity})
# return match
return match
matches, possible_matches = [], True
def get(s,k): return s == [] and 1e100 or s[0][k]
# loop until there are no macthes to perform
while possible_matches:
# try perform a match
if get(lo_buy,PRICE) >= get(lo_sell,PRICE) and \
get(lo_buy,OID)<get(mo_buy,OID) and \
get(lo_sell,OID)<get(mo_sell,OID):
matches.append(match(lo_buy,lo_sell))
elif get(lo_buy,OID)<get(mo_buy,OID) and mo_sell:
matches.append(match(lo_buy,mo_sell))
elif get(lo_sell,OID)<get(mo_sell,OID) and mo_buy:
matches.append(match(mo_buy,lo_sell))
elif mo_buy and mo_sell:
matches.append(match(mo_buy,mo_sell))
else:
possible_matches = False
# check if a stop order kicks in
while so_buy and self.price <= so_buy[0][PRICE]:
if so_buy[0][PRICE]: insert(lo_buy,so_buy[0],PRICE,1)
else: mo_buy.append(so_buy[0])
del so_buy[0]
possible_matches = True
while so_sell and self.price >= so_sell[0][PRICE]:
if so_sell[0][PRICE]: insert(lo_sell,so_sell[0],PRICE,-1)
else: mo_sell.append(so_sell[0])
del so_sell[0]
possible_matches = True
return self.oid, matches
TEMPLATE = """
<!DOCTYPE>
<html>
<head>
<title>Sample test</title>
<script type="text/javascript" src="http://code.jquery.com/jquery-1.4.2.min.js"></script>
</head>
<body>
<h1>Tornado Trading System: %(ticker)s</h1>
<form method='POST' action='./'>
<input ticker='order' id="order"/>
<div><input type='submit'></div>
</form>
<pre id="log"></pre>
<script type="text/javascript" charset="utf-8">
$(document).ready(function(){
$('form').submit(function(event){
var value = $('#order').val();
$.post("./", { order: value }, function(data){
$("#order").val('');
});
return false;
});
if ("WebSocket" in window) {
var ws = new WebSocket("ws://127.0.0.1:8888/realtime/");
ws.onopen = function() {};
ws.onmessage = function (evt) {
var received_msg = evt.data;
// var html = $("#log").html();
// html = received_msg+html;
$("#log").html(received_msg);
};
ws.onclose = function() {};
} else {
alert("WebSocket not supported");
}
});
</script>
</body>
</html>
"""
LISTENERS = []
class OrderHandler(tornado.web.RequestHandler):
def get(self):
try:
self.post() ### for benchmarks
except: pass
self.write(TEMPLATE % dict(ticker=engine.ticker))
def post(self):
if hmac_key and not 'signature' in self.request.arguments: return
if 'order' in self.request.arguments:
order = self.request.arguments['order'][0].strip()
if hmac_key:
signature = self.request.arguments['signature'][0]
if not hmac.new(hmac_key,order).hexdigest()==signature: return
oid,matches = engine.process(order)
if oid:
message = prettyprint(oid,order,matches,engine.state())
message = repr({'oid':oid,'order':order,'state':engine.state(),'matches':matches})
for client in LISTENERS: client.write_message(message)
self.write(str(oid))
class QuoteHandler(tornado.web.RequestHandler):
def get(self):
self.write(str(engine.price))
class QueryHandler(tornado.web.RequestHandler):
def get(self):
self.write(repr(engine.state()))
class RealtimeHandler(tornado.websocket.WebSocketHandler):
def open(self):
LISTENERS.append(self)
print 'client connected via websocket'
def on_message(self, message):
pass
def on_close(self):
LISTENERS.remove(self)
print 'client disconnected'
if __name__ == "__main__":
usage = "matchingserver -p 8888 -t intc"
version= ""
parser = optparse.OptionParser(usage, None, optparse.Option, version)
parser.add_option('-p',
'--port',
default='8888',
dest='port',
help='socket')
parser.add_option('-t',
'--ticker',
default='intc',
dest='ticker',
help='ticker name')
(options, args) = parser.parse_args()
urls=[
(r'/', OrderHandler),
(r'/quote', QuoteHandler),
(r'/query', QueryHandler),
(r'/realtime/', RealtimeHandler)]
engine = Engine(options.ticker)
application = tornado.web.Application(urls, auto_reload=True)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(int(options.port))
tornado.ioloop.IOLoop.instance().start()
| Python |
response.title = settings.title
response.subtitle = settings.subtitle
response.meta.author = '%s <%s>' % (settings.author, settings.author_email)
response.meta.keywords = settings.keywords
response.meta.description = settings.description
response.menu = [
(T('Index'),URL('index').xml()==URL().xml(),URL('index'),[]),
(T('Products'),URL('products').xml()==URL().xml(),URL('products'),[]),
(T('About'),URL('about').xml()==URL().xml(),URL('about'),[]),
]
| Python |
from gluon.storage import Storage
settings = Storage()
settings.migrate = True
settings.title = '[E]xchange [M]atching and [T]trading [E]ngine'
settings.subtitle = 'full-stack exchange and trading plaform (fast, scalable, secure)'
settings.author = 'mdipierro'
settings.author_email = 'mdipierro@cs.depaul.edu'
settings.keywords = ''
settings.description = ''
settings.layout_theme = 'default'
settings.database_uri = 'sqlite://storage.sqlite'
settings.security_key = 'e7f25229-0161-48f2-bdbe-e2c325a6b390'
settings.email_server = 'localhost'
settings.email_sender = 'you@example.com'
settings.email_login = ''
settings.login_method = 'local'
settings.login_config = ''
settings.hmac_key = 'secret' # to communicate to matchingengine.py
| Python |
# -*- coding: utf-8 -*-
db = DAL('sqlite://storage.sqlite') # if not, use SQLite or other DB
from gluon.tools import *
mail = Mail() # mailer
auth = Auth(globals(),db) # authentication/authorization
crud = Crud(globals(),db) # for CRUD helpers using auth
service = Service(globals()) # for json, xml, jsonrpc, xmlrpc, amfrpc
plugins = PluginManager()
mail.settings.server = settings.email_server
mail.settings.sender = settings.email_sender
mail.settings.login = settings.email_login
auth.settings.hmac_key = 'sha512:e7f25229-0161-48f2-bdbe-e2c325a6b390' # before define_tables()
########################################
db.define_table('auth_user',
Field('id','id',
represent=lambda id:SPAN(id,' ',A('view',_href=URL('auth_user_read',args=id)))),
Field('username', type='string',
label=T('Username')),
Field('first_name', type='string',
label=T('First Name')),
Field('last_name', type='string',
label=T('Last Name')),
Field('email', type='string',
label=T('Email')),
Field('password', type='password',
readable=False,
label=T('Password')),
Field('manager', type='boolean',
label=T('Manager')),
Field('actual_cash', 'double', default=0, requires=IS_FLOAT_IN_RANGE(0,10**9),
label=T('Actual Cash Balance')),
Field('virtual_cash', 'double', default=0, requires=IS_FLOAT_IN_RANGE(0,10**9),
label=T('Cash Available')),
Field('created_on','datetime',default=request.now,
label=T('Created On'),writable=False,readable=False),
Field('modified_on','datetime',default=request.now,
label=T('Modified On'),writable=False,readable=False,
update=request.now),
Field('registration_key',default='',
writable=False,readable=False),
Field('reset_password_key',default='',
writable=False,readable=False),
Field('registration_id',default='',
writable=False,readable=False),
format='%(username)s',
migrate=settings.migrate)
#db(db.auth_user.actual_cash==0).update(actual_cash=1000000)
#db(db.auth_user.virtual_cash==0).update(virtual_cash=1000000)
db.auth_user.first_name.requires = IS_NOT_EMPTY(error_message=auth.messages.is_empty)
db.auth_user.last_name.requires = IS_NOT_EMPTY(error_message=auth.messages.is_empty)
db.auth_user.password.requires = CRYPT(key=auth.settings.hmac_key)
db.auth_user.username.requires = IS_NOT_IN_DB(db, db.auth_user.username)
db.auth_user.registration_id.requires = IS_NOT_IN_DB(db, db.auth_user.registration_id)
db.auth_user.email.requires = (IS_EMAIL(error_message=auth.messages.invalid_email),
IS_NOT_IN_DB(db, db.auth_user.email))
auth.define_tables(migrate=settings.migrate) # creates all needed tables
auth.settings.mailer = mail # for user email verification
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.messages.verify_email = 'Click on the link http://'+request.env.http_host+URL(r=request,c='default',f='user',args=['verify_email'])+'/%(key)s to verify your email'
auth.settings.reset_password_requires_verification = True
auth.messages.reset_password = 'Click on the link http://'+request.env.http_host+URL(r=request,c='default',f='user',args=['reset_password'])+'/%(key)s to reset your password'
if not session.products: session.products=[]
| Python |
### we prepend t_ to tablenames and f_ to fieldnames for disambiguity
########################################
db.define_table('product',
Field('name', type='string',label=T('Name'),requires=[IS_MATCH('[_a-z]+'),IS_NOT_IN_DB(db,'product.name')]),
Field('description', type='string',label=T('Desciption')),
Field('unit_price', type='double',label=T('Unit Price')),
Field('post_url'),
Field('quote_url'),
Field('ws_url'),
Field('active','boolean',default=True,label=T('Active'),writable=False,readable=False),
Field('created_on','datetime',default=request.now,
label=T('Created On'),writable=False,readable=False),
Field('modified_on','datetime',default=request.now,
label=T('Modified On'),writable=False,readable=False,
update=request.now),
Field('created_by',db.auth_user,default=auth.user_id,
label=T('Created By'),writable=False,readable=False),
Field('modified_by',db.auth_user,default=auth.user_id,
label=T('Modified By'),writable=False,readable=False,
update=auth.user_id),
format='%(name)s',
migrate=settings.migrate)
db.define_table('buy_sell_order',
Field('product', db.product,readable=False,writable=False),
Field('buy_sell',requires=IS_IN_SET(('buy','sell')),default='buy'),
Field('quantity','integer',requires=IS_INT_IN_RANGE(1,10000)),
Field('price','double',default=0,requires=IS_FLOAT_IN_RANGE(0,10000),comment='0 for market order'),
Field('stop_price','double',default=0,comment='0 if not stop order'),
Field('oid','integer',default=0,writable=False,readable=False),
Field('created_on','datetime',default=request.now,writable=False,readable=False),
Field('created_by',db.auth_user,default=auth.user_id,writable=False,readable=False))
db.define_table('match',
Field('product', db.product),
Field('quantity','integer'),
Field('price','double'),
Field('buyer','integer'),
Field('seller','integer'),
Field('buy_oid',db.auth_user),
Field('sell_oid',db.auth_user),
Field('match_time','double'),
Field('created_on','datetime',writable=False,readable=False))
| Python |
# -*- coding: utf-8 -*-
# ##########################################################
# ## make sure administrator is on localhost
# ###########################################################
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1','127.0.0.1','::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.env.http_x_forwarded_for or request.env.wsgi_url_scheme\
in ['https', 'HTTPS']:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1"):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if not gluon.fileutils.check_credentials(request):
redirect(URL(a='admin', c='default', f='index'))
ignore_rw = True
response.view = 'appadmin.html'
response.menu = [[T('design'), False, URL('admin', 'default', 'design',
args=[request.application])], [T('db'), False,
URL('index')], [T('state'), False,
URL('state')], [T('cache'), False,
URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
cond = False
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename,db,request=request):
keyed = hasattr(db[tablename],'_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases)
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form,table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request,db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args)>1 and hasattr(db[request.args[1]],'_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
stop = start + 100
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px',
_name='query', _value=request.vars.query or '',
requires=IS_NOT_EMPTY(error_message=T("Cannot be empty")))), TR(T('Update:'),
INPUT(_name='update_check', _type='checkbox',
value=False), INPUT(_style='width:400px',
_name='update_fields', _value=request.vars.update_fields
or '')), TR(T('Delete:'), INPUT(_name='delete_check',
_class='delete', _type='checkbox', value=False), ''),
TR('', '', INPUT(_type='submit', _value='submit'))),
_action=URL(r=request,args=request.args))
if request.vars.csvfile != None:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'),PRE(str(e)))
if form.accepts(request.vars, formname=None):
# regex = re.compile(request.args[0] + '\.(?P<table>\w+)\.id\>0')
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query).count()
if form.vars.update_check and form.vars.update_fields:
db(query).update(**eval_in_global_env('dict(%s)'
% form.vars.update_fields))
response.flash = T('%s rows updated', nrows)
elif form.vars.delete_check:
db(query).delete()
response.flash = T('%s rows deleted', nrows)
nrows = db(query).count()
if orderby:
rows = db(query).select(limitby=(start, stop),
orderby=eval_in_global_env(orderby))
else:
rows = db(query).select(limitby=(start, stop))
except Exception, e:
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'),PRE(str(e)))
return dict(
form=form,
table=table,
start=start,
stop=stop,
nrows=nrows,
rows=rows,
query=request.vars.query,
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table],'_primarykey')
record = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[0]]).select().first()
else:
record = db(db[table].id == request.args(2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable=False
form = SQLFORM(db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('done!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form,table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
form = FORM(
P(TAG.BUTTON("Clear CACHE?", _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON("Clear RAM", _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON("Clear DISK", _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
clear_ram = False
clear_disk = False
session.flash = ""
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += "Ram Cleared "
if clear_disk:
cache.disk.clear()
session.flash += "Disk Cleared"
redirect(URL(r=request))
try:
from guppy import hpy; hp=hpy()
except ImportError:
hp = False
import shelve, os, copy, time, math
from gluon import portalocker
ram = {
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time()
}
disk = copy.copy(ram)
total = copy.copy(ram)
for key, value in cache.ram.storage.items():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
locker = open(os.path.join(request.folder,
'cache/cache.lock'), 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
disk_storage = shelve.open(
os.path.join(request.folder,
'cache/cache.shelve'))
for key, value in disk_storage.items():
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
portalocker.unlock(locker)
locker.close()
disk_storage.close()
total['bytes'] = ram['bytes'] + disk['bytes']
total['objects'] = ram['objects'] + disk['objects']
total['hits'] = ram['hits'] + disk['hits']
total['misses'] = ram['misses'] + disk['misses']
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] + total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
return dict(form=form, total=total,
ram=ram, disk=disk)
| Python |
# -*- coding: utf-8 -*-
### required - do no delete
def user(): return dict(form=auth())
def download(): return response.download(request,db)
def call():
session.forget()
return service()
### end requires
def index():
return dict()
def error():
return dict()
@auth.requires_login()
def products():
form=auth.user.manager and crud.create(db.product) or ''
rows=db(db.product).select(orderby=db.product.name)
return dict(rows=rows, form=form)
@auth.requires(auth.user and auth.user.manager)
def edit_product():
return dict(form=crud.update(db.product,request.args(0)))
def send(url,order):
import urllib, hmac
sig = settings.hmac_key and hmac.new(settings.hmac_key,order).hexdigest() or ''
params = urllib.urlencode({'order': order, 'signature': sig})
f = urllib.urlopen(url, params)
data= f.read()
f.close()
return data
@auth.requires_login()
def trade():
product = db.product(request.args(0)) or redirect(URL('products'))
return dict(product=product)
@auth.requires_login()
def order():
print request.vars
product = db.product(request.vars.product)
order = '%i:%s %s %s@%s/%s' % (auth.user.id,request.vars.type,
product.name,request.vars.quantity,
request.vars.price or 0,request.vars.stop or 0)
oid = int(send(product.post_url,order))
db.buy_sell_order.insert(buy_sell=request.vars.type,
product=product.id,
quantity=request.vars.quantity,
price=request.vars.price,
stop_price=request.vars.stop_price,
oid=oid)
#if request.vars.type=='buy':
# db(db.auth_user.id==auth.user.id).update(virtual_cash=db.auth_user.virtual_cash-request.vars.price*form.vars.quantity)
return oid
@auth.requires_login()
def zap():
db(db.buy_sell_order).delete()
db(db.match).delete()
return 'zapped!'
@auth.requires_login()
def delete():
print request.vars
product=db.product(request.vars.product)
order=db.buy_sell_order(oid=request.vars.oid,product=request.vars.product,created_by=auth.user.id)
print order
if not order: return
order = '%s:del %s %s' % (auth.user.id,product.name,request.vars.oid)
print order
print send(product.post_url,order)
print request.vars
@auth.requires_login()
def pandl():
import urllib
product = db.product(request.args(0)) or redirect(URL('products'))
f = urllib.urlopen(product.quote_url)
quote = float(f.read())
f.close()
user = db.auth_user[auth.user.id]
rows = db((db.match.buyer==auth.user.id)|(db.match.seller==auth.user.id)).select(orderby=db.match.match_time)
return dict(rows=rows,user=user,quote=quote)
@auth.requires_login()
def about():
return dict()
@auth.requires_login()
def old_trade():
product = db.product(request.args(0)) or redirect(URL('products'))
form=LOAD(request.controller,'trade_form',args=[product.id],ajax_trap=True)
return dict(form=form,product=product)
@auth.requires_login()
def trade_form():
product = db.product(request.args(0))
db.buy_sell_order.product.default = product.id
form = SQLFORM(db.buy_sell_order)
form.element(_name='buy_sell')['_style']='width:70px'
form.element(_name='quantity')['_style']='width:70px'
form.element(_name='price')['_style']='width:70px'
form.element(_name='stop_price')['_style']='width:70px'
if form.accepts(request):
import urllib, hmac
order = '%i:%s %s %i@%s/%s' % (auth.user.id,form.vars.buy_sell,
product.name,form.vars.quantity,
form.vars.price or 0,form.vars.stop or 0)
oid = int(send(product.post_url,order))
db(db.buy_sell_order.id==form.vars.id).update(oid=oid)
#if form.vars.buy_sell=='buy':
# db(db.auth_user.id==auth.user.id).update(virtual_cash=db.auth_user.virtual_cash-form.vars.price*form.vars.quantity)
response.js = "jQuery('.flash').html('your order %s was submitted').slideDown()" % order
return form
| Python |
import socket
debug = True
## Trace debugging messages.
# @param aString String to be printed.
def printd( aString ):
if debug:
print aString
# FlyerSettings
# define the default settings
class FlyerSettings( dict ):
## The constructor
# @param self: The object pointer.
def __init__( self ):
dict.__init__(self)
self["SERVER"] = "127.0.0.1"
self["PORT"] = "843"
self["MAX_CLIENT"] = 2000
self["BUFFER_SIZE"] = 1024
self["FLYER_VERSION"] = "1,0,19"
self["POLICY_FILE"] = _policyFile ='<?xml version="1.0" encoding="UTF-8"?><cross-domain-policy xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://www.adobe.com/xml/schemas/PolicyFileSocket.xsd"><allow-access-from domain="*" to-ports="*" secure="false" /><site-control permitted-cross-domain-policies="master-only" /></cross-domain-policy>\n'
# Flyer Maemo
# Define the hello world application
class FlyerMaemo:
## The constructor.
# @param self: The object pointer.
# @param aFilePath: file path
def __init__( self, aFilePath=None ):
if(aFilePath is not None):
self._filePath = aFilePath
def _helloMaemo(self, aCallback=None):
printd("Hello maemo")
return "hello maemo"
# FlyerServerSocket
class FlyerServerSocket:
## The constructor.
# @param self: The object pointer.
# @param aHost: Host name to connect to.
# @param aPort: Port on the host to bind.
# @param aMaxClient: Maximum number of concurrent connection.
def __init__( self, aHost, aPort, aMaxClient):
self._connector = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
self._connector.bind ( ( str(aHost), int(aPort) ) )
self._connector.listen ( int(aMaxClient) )
self._channel = None
self._details = None
self._running = 1
self._iMaxClient = int(aMaxClient)
self._setCommands()
printd("Flyer Interface: (%s:%s) - concurrent connection: %s"%(aHost,
aPort,
aMaxClient))
def _setCommands( self ):
self._iFlyerMaemo = FlyerMaemo()
self._iCommands = {}
self._iCommands["sayHello"] = self._iFlyerMaemo._helloMaemo
## Broadcast the message to connected clients.
# @param self: The object pointer
# @param aText: Sends a message to the client
def _broadcastMessage(self, aText):
aText += '\0'
self._channel.send(aText)
## Watch for channel incoming requests.
# @param self: The object pointer.
# @param aData: Data received from the client
def _channelWatcher( self, aData ):
msg = aData.replace("\0", "")
arrData = msg.split("|")
iParametersDict = {}
if("policy-file-request" not in msg):
if msg is not None:
iParametersDict['COMMAND'] = arrData[0] # command name
printd("Received command " + arrData[0])
try:
self._broadcastMessage( self._iCommands[iParametersDict['COMMAND']]() )
except KeyError:
printd("Invalid Command. Details Error #0001")
else:
self._channel.send( FlyerSettings()["POLICY_FILE"] )
printd("policy file received")
#self._channelWatcher( self._channel.recv( FlyerSettings()["BUFFER_SIZE"] ))
#self._channel.recv(FlyerSettings()["BUFFER_SIZE"], cb=self._channelWatcher)
## Listen interface for incomming connection. Handle incomming connection request.
# @param self: The object pointer.
def _connectionHandler( self ):
printd("Wait for incoming connection...")
while self._running:
channel, details = self._connector.accept()
self._channel = channel
self._details = details
if self._running:
printd( 'New connection with: ' + str(details) )
self._channel.setblocking( 1 )
#self._channel.recv(FlyerSettings()["BUFFER_SIZE"], cb=self._channelWatcher)
self._channelWatcher( self._channel.recv( FlyerSettings()["BUFFER_SIZE"] ))
##printd( 'host: ' + str(details[0]) )
#printd( 'port: ' + str(details[1]) )
printd("Closing incoming connection")
## Start the handler
# @param self the object pointer.
def start( self ):
printd( "Starting Flyer Framework for Maemo v." + FlyerSettings()["FLYER_VERSION"] )
self._connectionHandler()
## Close server
# @param self The object pointer.
# @param aFrame Frame.
def close( self ):
printd("Closing FlyerFramework")
self._running = 0
## FlyerFramework singletong
class FlyerFramework( object ):
## Stores the unique Singleton instance-
_iInstance = None
## Flyer class declaration
class FlyerFrameworkClass:
## The constructor.
# @param self: The object pointer.
def __init__( self ):
self._iSocketServer = None
## Start the server.
# @param self The object pointer.
def start(self):
self._iSocketServer = FlyerServerSocket(
FlyerSettings()["SERVER"],
FlyerSettings()["PORT"],
FlyerSettings()["MAX_CLIENT"])
self._iSocketServer.start()
printd("FlyerFramework started")
## Stop the server.
# @param self The object pointer.
def stop(self):
if self._iSocketServer:
self._iSocketServer.close()
del self._iSocketServer
self._iSocketServer = None
printd("FlyerFramework stopped")
## Restart server.
# @param self The object pointer.
def restart(self):
printd("Restarting FlyerFramework")
self.stop()
self.start()
## The destructor
# @param self: The object pointer
def __del__( self ):
self.stop()
###########################################################################
# Singleton accessors
###########################################################################
## The constructor
# @param self The object pointer.
def __init__( self ):
# Check whether we already have an instance
if FlyerFramework._iInstance is None:
# Create and remember instanc
FlyerFramework._iInstance = FlyerFramework.FlyerFrameworkClass()
# Store instance reference as the only member in the handle
self.__dict__['_EventHandler_instance'] = FlyerFramework._iInstance
## Delegate access to implementation.
# @param self The object pointer.
# @param attr Attribute wanted.
# @return Attribute
def __getattr__(self, aAttr):
return getattr(self._iInstance, aAttr)
## Delegate access to implementation.
# @param self The object pointer.
# @param attr Attribute wanted.
# @param value Vaule to be set.
# @return Result of operation.
def __setattr__(self, aAttr, aValue):
return setattr(self._iInstance, aAttr, aValue )
## Flyer daemon server class.
class FlyerDaemon:
## Run the server
# @param self The object pointer.
def run(self):
#self._flyerLock = e32.Ao_lock()
FlyerFramework().start()
#self._flyerLock.wait()
## Destructor. Stops the networking servers
# @param self The object pointer.
def close(self):
printd("Closing Flyer Framework")
FlyerFramework().stop()
appuifw.app.set_exit()
#self._flyerLock.signal()
flyerDaemon = FlyerDaemon()
flyerDaemon.run()
| Python |
import socket
debug = True
## Trace debugging messages.
# @param aString String to be printed.
def printd( aString ):
if debug:
print aString
# FlyerSettings
# define the default settings
class FlyerSettings( dict ):
## The constructor
# @param self: The object pointer.
def __init__( self ):
dict.__init__(self)
self["SERVER"] = "127.0.0.1"
self["PORT"] = "843"
self["MAX_CLIENT"] = 2000
self["BUFFER_SIZE"] = 1024
self["FLYER_VERSION"] = "1,0,20"
self["POLICY_FILE"] = _policyFile ='<?xml version="1.0" encoding="UTF-8"?><cross-domain-policy xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://www.adobe.com/xml/schemas/PolicyFileSocket.xsd"><allow-access-from domain="*" to-ports="*" secure="false" /><site-control permitted-cross-domain-policies="master-only" /></cross-domain-policy>\0'
# Flyer Maemo
# Define the hello world application
class FlyerMaemo:
## The constructor.
# @param self: The object pointer.
# @param aFilePath: file path
def __init__( self, aFilePath=None ):
if(aFilePath is not None):
self._filePath = aFilePath
def _helloMaemo(self, aText=None, aCallback=None):
if(aText == None):
aText = "Maemo"
return "Hello " + aText + "!"
# FlyerServerSocket
class FlyerServerSocket:
## The constructor.
# @param self: The object pointer.
# @param aHost: Host name to connect to.
# @param aPort: Port on the host to bind.
# @param aMaxClient: Maximum number of concurrent connection.
def __init__( self, aHost, aPort, aMaxClient):
self._connector = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
self._connector.bind ( ( str(aHost), int(aPort) ) )
self._connector.listen ( int(aMaxClient) )
self._channel = None
self._details = None
self._running = 1
self._iMaxClient = int(aMaxClient)
self._setCommands()
printd("Flyer Interface: (%s:%s) - concurrent connection: %s"%(aHost,
aPort,
aMaxClient))
def _setCommands( self ):
self._iFlyerMaemo = FlyerMaemo()
self._iCommands = {}
self._iCommands["sayHello"] = self._iFlyerMaemo._helloMaemo
## Broadcast the message to connected clients.
# @param self: The object pointer
# @param aText: Sends a message to the client
def _broadcastMessage(self, aText):
aText += '\0'
self._channel.send(aText)
## Watch for channel incoming requests.
# @param self: The object pointer.
# @param aData: Data received from the client
def _channelWatcher( self, aData ):
msg = aData.replace("\0", "")
arrData = msg.split(":")
iParametersDict = {}
# printd("\n\nMessage received: " + aData)
if("policy-file-request" not in msg):
if msg is not None:
try:
iParametersDict['COMMAND'] = arrData[0] # command name
iParametersDict['VALUES'] = arrData[1] # parameters
# printd("Received command " + arrData[0])
except:
printd("Invalid command/argument. Details Error #0002")
try:
self._broadcastMessage( self._iCommands[iParametersDict['COMMAND']]( iParametersDict['VALUES'] ) )
block = ""
while "exit" not in block:
# printd(len(block))
block = self._channel.recv( FlyerSettings()["BUFFER_SIZE"] )
if(len(block) > 0):
self._channelWatcher( block )
break
except KeyError:
printd("Invalid command. Details Error #0001")
else:
self._channel.send( FlyerSettings()["POLICY_FILE"] )
#self._channelWatcher( self._channel.recv( FlyerSettings()["BUFFER_SIZE"] ))
## Listen interface for incomming connection. Handle incomming connection request.
# @param self: The object pointer.
def _connectionHandler( self ):
printd("Wait for incoming connection...")
while self._running:
channel, details = self._connector.accept()
self._channel = channel
self._details = details
if self._running:
printd( 'New connection with: ' + str(details) )
self._channel.setblocking( 1 )
#self._channel.recv(FlyerSettings()["BUFFER_SIZE"], cb=self._channelWatcher)
self._channelWatcher( self._channel.recv( FlyerSettings()["BUFFER_SIZE"] ))
##printd( 'host: ' + str(details[0]) )
#printd( 'port: ' + str(details[1]) )
printd("Closing incoming connection")
## Start the handler
# @param self the object pointer.
def start( self ):
printd( "Starting Flyer Framework for Maemo v." + FlyerSettings()["FLYER_VERSION"] )
self._connectionHandler()
## Close server
# @param self The object pointer.
# @param aFrame Frame.
def close( self ):
printd("Closing FlyerFramework")
self._running = 0
## FlyerFramework singletong
class FlyerFramework( object ):
## Stores the unique Singleton instance-
_iInstance = None
## Flyer class declaration
class FlyerFrameworkClass:
## The constructor.
# @param self: The object pointer.
def __init__( self ):
self._iSocketServer = None
## Start the server.
# @param self The object pointer.
def start(self):
self._iSocketServer = FlyerServerSocket(
FlyerSettings()["SERVER"],
FlyerSettings()["PORT"],
FlyerSettings()["MAX_CLIENT"])
self._iSocketServer.start()
printd("FlyerFramework started")
## Stop the server.
# @param self The object pointer.
def stop(self):
if self._iSocketServer:
self._iSocketServer.close()
del self._iSocketServer
self._iSocketServer = None
printd("FlyerFramework stopped")
## Restart server.
# @param self The object pointer.
def restart(self):
printd("Restarting FlyerFramework")
self.stop()
self.start()
## The destructor
# @param self: The object pointer
def __del__( self ):
self.stop()
###########################################################################
# Singleton accessors
###########################################################################
## The constructor
# @param self The object pointer.
def __init__( self ):
# Check whether we already have an instance
if FlyerFramework._iInstance is None:
# Create and remember instanc
FlyerFramework._iInstance = FlyerFramework.FlyerFrameworkClass()
# Store instance reference as the only member in the handle
self.__dict__['_EventHandler_instance'] = FlyerFramework._iInstance
## Delegate access to implementation.
# @param self The object pointer.
# @param attr Attribute wanted.
# @return Attribute
def __getattr__(self, aAttr):
return getattr(self._iInstance, aAttr)
## Delegate access to implementation.
# @param self The object pointer.
# @param attr Attribute wanted.
# @param value Vaule to be set.
# @return Result of operation.
def __setattr__(self, aAttr, aValue):
return setattr(self._iInstance, aAttr, aValue )
## Flyer daemon server class.
class FlyerDaemon:
## Run the server
# @param self The object pointer.
def run(self):
#self._flyerLock = e32.Ao_lock()
FlyerFramework().start()
#self._flyerLock.wait()
## Destructor. Stops the networking servers
# @param self The object pointer.
def close(self):
printd("Closing Flyer Framework")
FlyerFramework().stop()
appuifw.app.set_exit()
#self._flyerLock.signal()
flyerDaemon = FlyerDaemon()
flyerDaemon.run()
| Python |
#!/usr/bin/python
import os
import os.path
import sys
# you're going to want to change this location.
appFolder = "/Volumes/srv/Users/gus/Applications"
if not os.path.exists(appFolder):
print("The folder " + appFolder + " does not exist.")
sys.exit(1)
s = os.popen("curl http://nightly.webkit.org/builds/trunk/mac/rss").read()
sloc = s.find('<guid>')
eloc = s.find('</guid>')
#url = "http://builds.nightly.webkit.org/" + s[sloc+10:eloc + 4]
url = s[sloc+6: eloc]
print url
os.chdir("/tmp")
os.popen("curl -O " + url)
os.popen("/usr/bin/hdiutil mount WebKit-SVN*.dmg")
os.popen("killall Safari")
os.popen("rm -r " + appFolder + "/WebKit.app")
os.popen("cp -rp /Volumes/WebKit/WebKit.app " + appFolder + "/")
os.popen("hdiutil detach /Volumes/WebKit")
os.popen("rm /tmp/WebKit-SVN*.dmg")
os.popen("open -a WebKit")
| Python |
#!/usr/bin/env python3.0
'''
HELLO,
This is Gus's command line interface for Time Machine, because he can't stand the built in way to restore.
There are probably alllllllll kinds of problems with this script, so use at your own risk.
BUILT WITH BBEDIT
'''
import os
import os.path
import sys
import shutil
baseDir = "/Volumes/Drobo/Backups.backupdb/srv/"
def main(argv=None):
if argv is None:
argv = sys.argv
#opts, args = getopt.getopt(sys.argv[1:], "d:")
file = sys.argv[1]
fullPath = os.path.abspath(file)
if fullPath.startswith("/Volumes"):
fullPath = fullPath[8:]
fileLocations = []
print("Please be patient danielson. Looking for copies of:\n" + fullPath)
print()
print(" * <-- imagine that's spinning or something, like a good mac os app would do")
print()
for base in os.listdir(baseDir):
if base.endswith(".inProgress"):
continue
tmPath = baseDir + base + fullPath
if os.path.exists(tmPath):
fileLocations.append(tmPath)
fileLocations.sort()
command = "l"
while True:
if command.startswith("k"):
print("Goodbye!")
sys.exit(0)
elif command == "l":
idx = 0
for l in fileLocations:
idx = idx + 1
l = l[len(baseDir):]
print("%d %s" % (idx, l))
elif command.startswith("q"):
(junk, index) = command.split()
v = int(index) - 1
os.popen("/usr/bin/qlmanage -p '" + fileLocations[v] + "' 2>&1").read()
elif command.startswith("r"):
(junk, index) = command.split()
v = int(index) - 1
print("Restore " + fileLocations[v] + "? (y/n)")
if sys.stdin.readline().strip() == "y":
print("Copying...")
shutil.copy(fileLocations[v], sys.argv[1])
print("All done.")
else:
print("Ok, I thought you were just kidding. I had to make sure though.")
else:
print("Unknown command '%s'" % (command))
print("q [num] to quicklook, k to quit, r [num] to restore")
command = sys.stdin.readline().strip()
if len(command) == 0:
command = "k"
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python3.0
'''
HELLO,
This is Gus's command line interface for Time Machine, because he can't stand the built in way to restore.
There are probably alllllllll kinds of problems with this script, so use at your own risk.
BUILT WITH BBEDIT
'''
import os
import os.path
import sys
import shutil
baseDir = "/Volumes/Drobo/Backups.backupdb/srv/"
def main(argv=None):
if argv is None:
argv = sys.argv
#opts, args = getopt.getopt(sys.argv[1:], "d:")
file = sys.argv[1]
fullPath = os.path.abspath(file)
if fullPath.startswith("/Volumes"):
fullPath = fullPath[8:]
fileLocations = []
print("Please be patient danielson. Looking for copies of:\n" + fullPath)
print()
print(" * <-- imagine that's spinning or something, like a good mac os app would do")
print()
for base in os.listdir(baseDir):
if base.endswith(".inProgress"):
continue
tmPath = baseDir + base + fullPath
if os.path.exists(tmPath):
fileLocations.append(tmPath)
fileLocations.sort()
command = "l"
while True:
if command.startswith("k"):
print("Goodbye!")
sys.exit(0)
elif command == "l":
idx = 0
for l in fileLocations:
idx = idx + 1
l = l[len(baseDir):]
print("%d %s" % (idx, l))
elif command.startswith("q"):
(junk, index) = command.split()
v = int(index) - 1
os.popen("/usr/bin/qlmanage -p '" + fileLocations[v] + "' 2>&1").read()
elif command.startswith("r"):
(junk, index) = command.split()
v = int(index) - 1
print("Restore " + fileLocations[v] + "? (y/n)")
if sys.stdin.readline().strip() == "y":
print("Copying...")
shutil.copy(fileLocations[v], sys.argv[1])
print("All done.")
else:
print("Ok, I thought you were just kidding. I had to make sure though.")
else:
print("Unknown command '%s'" % (command))
print("q [num] to quicklook, k to quit, r [num] to restore")
command = sys.stdin.readline().strip()
if len(command) == 0:
command = "k"
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import os.path
import sys
# you're going to want to change this location.
appFolder = "/Volumes/srv/Users/gus/Applications"
if not os.path.exists(appFolder):
print("The folder " + appFolder + " does not exist.")
sys.exit(1)
s = os.popen("curl http://nightly.webkit.org/builds/trunk/mac/rss").read()
sloc = s.find('<guid>')
eloc = s.find('</guid>')
#url = "http://builds.nightly.webkit.org/" + s[sloc+10:eloc + 4]
url = s[sloc+6: eloc]
print url
os.chdir("/tmp")
os.popen("curl -O " + url)
os.popen("/usr/bin/hdiutil mount WebKit-SVN*.dmg")
os.popen("killall Safari")
os.popen("rm -r " + appFolder + "/WebKit.app")
os.popen("cp -rp /Volumes/WebKit/WebKit.app " + appFolder + "/")
os.popen("hdiutil detach /Volumes/WebKit")
os.popen("rm /tmp/WebKit-SVN*.dmg")
os.popen("open -a WebKit")
| Python |
#!/usr/bin/python -u
# -*- coding: utf-8 -*-
""" Python Highlighter Version: 0.8
py2html.py [options] files...
options:
-h print help
- read from stdin, write to stdout
-stdout read from files, write to stdout
-files read from files, write to filename+'.html' (default)
-format:
html output XHTML page (default)
rawhtml output pure XHTML (without headers, titles, etc.)
-mode:
color output in color (default)
mono output b/w (for printing)
-title:Title use 'Title' as title of the generated page
-bgcolor:color use color as background-color for page
-header:file use contents of file as header
-footer:file use contents of file as footer
-URL replace all occurances of 'URL: link' with
'<a href="link">link</a>'; this is always enabled
in CGI mode
-v verbose
Takes the input, assuming it is Python code and formats it into
colored XHTML. When called without parameters the script tries to
work in CGI mode. It looks for a field 'script=URL' and tries to
use that URL as input file. If it can't find this field, the path
info (the part of the URL following the CGI script name) is
tried. In case no host is given, the host where the CGI script
lives and HTTP are used.
* Uses Just van Rossum's PyFontify version 0.3 to tag Python scripts.
You can get it via his homepage on starship:
URL: http://starship.python.net/crew/just
"""
__comments__ = """
The following snippet is a small shell script I use for viewing
Python scripts via less on Unix:
pyless:
#!/bin/sh
# Browse pretty printed Python code using ANSI codes for highlighting
py2html -stdout -format:ansi -mode:color $* | less -r
History:
0.8: Added patch by Patrick Lynch to have py2html.py use style
sheets for markup
0.7: Added patch by Ville Skytt‰ to make py2html.py output
valid XHTML.
0.6: Fixed a bug in .escape_html(); thanks to Vespe Savikko for
finding this one.
0.5: Added a few suggestions by Kevin Ng to make the CGI version
a little more robust.
"""
__copyright__ = """\
Copyright (c) 1998-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2002, eGenix.com Software GmbH; mailto:info@egenix.com
Distributed under the terms and conditions of the eGenix.com Public
License. See http://www.egenix.com/files/python/mxLicense.html for
details, or contact the author. All Rights Reserved.\
"""
__version__ = '0.8'
__cgifooter__ = ('\n<pre># code highlighted using <a href='
'"http://www.lemburg.com/files/python/">py2html.py</a> '
'version %s</pre>\n' % __version__)
import sys,string,re
# Adjust path so that PyFontify is found...
sys.path.append('.')
### Constants
# URL of the input form the user is redirected to in case no script=xxx
# form field is given. The URL *must* be absolute. Leave blank to
# have the script issue an error instead.
INPUT_FORM = 'http://www.lemburg.com/files/python/SoftwareDescriptions.html#py2html.py'
# HTML DOCTYPE and XML namespace
HTML_DOCTYPE = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
HTML_XMLNS = ' xmlns="http://www.w3.org/1999/xhtml"'
### Helpers
def fileio(file, mode='rb', data=None, close=0):
if type(file) == type(''):
f = open(file,mode)
close = 1
else:
f = file
if data:
f.write(data)
else:
data = f.read()
if close: f.close()
return data
### Converter class
class PrettyPrint:
""" generic Pretty Printer class
* supports tagging Python scripts in the following ways:
# format/mode | color mono
# --------------------------
# rawhtml | x x (HTML without headers, etc.)
# html | x x (a HTML page with HEAD&BODY:)
# ansi | x x (with Ansi-escape sequences)
* interfaces:
file_filter -- takes two files: input & output (may be stdin/stdout)
filter -- takes a string and returns the highlighted version
* to create an instance use:
c = PrettyPrint(tagfct,format,mode)
where format and mode must be strings according to the
above table if you plan to use PyFontify.fontify as
tagfct
* the tagfct has to take one argument, text, and return a taglist
(format: [(id,left,right,sublist),...], where id is the
"name" given to the slice left:right in text and sublist is a
taglist for tags inside the slice or None)
"""
# misc settings
title = ''
bgcolor = '#FFFFFF'
css = ''
header = ''
footer = ''
replace_URLs = 0
# formats to be used
formats = {}
def __init__(self,tagfct=None,format='html',mode='color'):
self.tag = tagfct
self.set_mode = getattr(self,'set_mode_%s_%s' % (format, mode))
self.filter = getattr(self,'filter_%s' % format)
def file_filter(self,infile,outfile):
self.set_mode()
text = fileio(infile,'r')
if type(infile) == type('') and self.title == '':
self.title = infile
fileio(outfile,'w',self.filter(text))
### Set pre- and postfixes for formats & modes
#
# These methods must set self.formats to a dictionary having
# an entry for every tag returned by the tagging function.
#
# The format used is simple:
# tag:(prefix,postfix)
# where prefix and postfix are either strings or callable objects,
# that return a string (they are called with the matching tag text
# as only parameter). prefix is inserted in front of the tag, postfix
# is inserted right after the tag.
def set_mode_html_color(self):
self.css = """
<STYLE TYPE="text/css">
<!--
body{ background: %s; }
.PY_KEYWORD{ color: #0000C0; font-weight: bold; }
.PY_COMMENT{ color: #000080; }
.PY_PARAMETER{ color: #C00000; }
.PY_IDENTIFIER{ color: #C00000; font-weight: bold; }
.PY_STRING{ color: #008000; }
-->
</STYLE> """ % self.bgcolor
self.formats = {
'all':('<pre>','</pre>'),
'comment':('<span class="PY_COMMENT">','</span>'),
'keyword':('<span class="PY_KEYWORD">','</span>'),
'parameter':('<span class="PY_PARAMETER">','</span>'),
'identifier':( lambda x,strip=string.strip:
'<a name="%s"><span class="PY_IDENTIFIER">' % (strip(x)),
'</span></a>'),
'string':('<span class="PY_STRING">','</span>')
}
set_mode_rawhtml_color = set_mode_html_color
def set_mode_html_mono(self):
self.css = """
<STYLE TYPE="text/css">
<!--
body{ background-color: %s }
.PY_KEYWORD{ text-decoration: underline }
.PY_COMMENT{ }
.PY_PARAMETER{ }
.PY_IDENTIFIER{ font-weight: bold}
.PY_STRING{ font-style: italic}
-->
</STYLE> """ % self.bgcolor
self.formats = {
'all':('<pre>','</pre>'),
'comment':('<span class="PY_COMMENT">','</span>'),
'keyword':( '<span class="PY_KEYWORD">','</span>'),
'parameter':('<span class="PY_PARAMETER">','</span>'),
'identifier':( lambda x,strip=string.strip:
'<a name="%s"><span class="PY_IDENTIFIER">' % (strip(x)),
'</span></a>'),
'string':('<span class="PY_STRING">','</span>')
}
set_mode_rawhtml_mono = set_mode_html_mono
def set_mode_ansi_mono(self):
self.formats = {
'all':('',''),
'comment':('\033[2m','\033[m'),
'keyword':('\033[4m','\033[m'),
'parameter':('',''),
'identifier':('\033[1m','\033[m'),
'string':('','')
}
def set_mode_ansi_color(self):
self.formats = {
'all':('',''),
'comment':('\033[34;2m','\033[m'),
'keyword':('\033[1;34m','\033[m'),
'parameter':('',''),
'identifier':('\033[1;31m','\033[m'),
'string':('\033[32;2m','\033[m')
}
### Filters for Python scripts given as string
def escape_html(self,text):
t = (('&','&'),('<','<'),('>','>'))
for x,y in t:
text = string.join(string.split(text,x),y)
return text
def filter_html(self,text):
output = self.fontify(self.escape_html(text))
if self.replace_URLs:
output = re.sub('URL:([ \t]+)([^ \n\r<]+)',
'URL:\\1<a href="\\2">\\2</a>',output)
html = """%s<html%s>
<head>
<title>%s</title>
<!--css-->
%s
</head>
<body>
<!--header-->
%s
<!--script-->
%s
<!--footer-->
%s
</body></html>\n"""%(HTML_DOCTYPE,
HTML_XMLNS,
self.title,
self.css,
self.header,
output,
self.footer)
return html
def filter_rawhtml(self,text):
output = self.fontify(self.escape_html(text))
if self.replace_URLs:
output = re.sub('URL:([ \t]+)([^ \n\r<]+)',
'URL:\\1<a href="\\2">\\2</a>',output)
return self.header + output + self.footer
def filter_ansi(self,text):
output = self.fontify(text)
return self.header + output + self.footer
### Fontify engine
def fontify(self,pytext):
# parse
taglist = self.tag(pytext)
# prepend special 'all' tag:
taglist[:0] = [('all',0,len(pytext),None)]
# prepare splitting
splits = []
addsplits(splits,pytext,self.formats,taglist)
# do splitting & inserting
splits.sort()
l = []
li = 0
for ri,dummy,insert in splits:
if ri > li: l.append(pytext[li:ri])
l.append(insert)
li = ri
if li < len(pytext): l.append(pytext[li:])
return string.join(l,'')
def addsplits(splits,text,formats,taglist):
""" Helper for .fontify()
"""
for id,left,right,sublist in taglist:
try:
pre,post = formats[id]
except KeyError:
# sys.stderr.write('Warning: no format for %s specified\n'%repr(id))
pre,post = '',''
if type(pre) != type(''):
pre = pre(text[left:right])
if type(post) != type(''):
post = post(text[left:right])
# len(splits) is a dummy used to make sorting stable
splits.append((left,len(splits),pre))
if sublist:
addsplits(splits,text,formats,sublist)
splits.append((right,len(splits),post))
def write_html_error(titel,text):
print """\
%s<html%s><head><title>%s</title></head>
<body>
<h2>%s</h2>
%s
</body></html>
""" % (HTML_DOCTYPE,HTML_XMLNS,titel,titel,text)
def redirect_to(url):
sys.stdout.write('Content-Type: text/html\r\n')
sys.stdout.write('Status: 302\r\n')
sys.stdout.write('Location: %s\r\n\r\n' % url)
print """
%s<html%s><head>
<title>302 Moved Temporarily</title>
</head><body>
<h1>302 Moved Temporarily</h1>
The document has moved to <a href="%s">%s</a>.<p></p>
</body></html>
""" % (HTML_DOCTYPE,HTML_XMLNS,url,url)
def main(cmdline):
""" main(cmdline) -- process cmdline as if it were sys.argv
"""
# parse options/files
options = []
optvalues = {}
for o in cmdline[1:]:
if o[0] == '-':
if ':' in o:
k,v = tuple(string.split(o,':'))
optvalues[k] = v
options.append(k)
else:
options.append(o)
else:
break
files = cmdline[len(options)+1:]
### create converting object
# load fontifier
if '-marcs' in options:
# use mxTextTool's tagging engine as fontifier
from mx.TextTools import tag
from mx.TextTools.Examples.Python import python_script
tagfct = lambda text,tag=tag,pytable=python_script: \
tag(text,pytable)[1]
print "Py2HTML: using Marc's tagging engine"
else:
# load Just's fontifier
try:
import PyFontify
if PyFontify.__version__ < '0.3': raise ValueError
tagfct = PyFontify.fontify
except:
print """
Sorry, but this script needs the PyFontify.py module version 0.3;
You can download it from Just's homepage at
URL: http://starship.python.net/crew/just
"""
sys.exit()
if '-format' in options:
format = optvalues['-format']
else:
# use default
format = 'html'
if '-mode' in options:
mode = optvalues['-mode']
else:
# use default
mode = 'color'
c = PrettyPrint(tagfct,format,mode)
convert = c.file_filter
### start working
if '-title' in options:
c.title = optvalues['-title']
if '-bgcolor' in options:
c.bgcolor = optvalues['-bgcolor']
if '-header' in options:
try:
f = open(optvalues['-header'])
c.header = f.read()
f.close()
except IOError:
if verbose: print 'IOError: header file not found'
if '-footer' in options:
try:
f = open(optvalues['-footer'])
c.footer = f.read()
f.close()
except IOError:
if verbose: print 'IOError: footer file not found'
if '-URL' in options:
c.replace_URLs = 1
if '-' in options:
convert(sys.stdin,sys.stdout)
sys.exit()
if '-h' in options:
print __doc__
sys.exit()
if len(files) == 0:
# Turn URL processing on
c.replace_URLs = 1
# Try CGI processing...
import cgi,urllib,urlparse,os
form = cgi.FieldStorage()
if not form.has_key('script'):
# Ok, then try pathinfo
if not os.environ.has_key('PATH_INFO'):
if INPUT_FORM:
redirect_to(INPUT_FORM)
else:
sys.stdout.write('Content-Type: text/html\r\n\r\n')
write_html_error('Missing Parameter',
'Missing script=URL field in request')
sys.exit(1)
url = os.environ['PATH_INFO'][1:] # skip the leading slash
else:
url = form['script'].value
sys.stdout.write('Content-Type: text/html\r\n\r\n')
scheme, host, path, params, query, frag = urlparse.urlparse(url)
if not host:
scheme = 'http'
if os.environ.has_key('HTTP_HOST'):
host = os.environ['HTTP_HOST']
else:
host = 'localhost'
url = urlparse.urlunparse((scheme, host, path, params, query, frag))
#print url; sys.exit()
network = urllib.URLopener()
try:
tempfile,headers = network.retrieve(url)
except IOError,reason:
write_html_error('Error opening "%s"' % url,
'The given URL could not be opened. Reason: %s' %\
str(reason))
sys.exit(1)
f = open(tempfile,'rb')
c.title = url
c.footer = __cgifooter__
convert(f,sys.stdout)
f.close()
network.close()
sys.exit()
if '-stdout' in options:
filebreak = '-'*72
for f in files:
try:
if len(files) > 1:
print filebreak
print 'File:',f
print filebreak
convert(f,sys.stdout)
except IOError:
pass
else:
verbose = ('-v' in options)
if verbose:
print 'Py2HTML: working on',
for f in files:
try:
if verbose: print f,
convert(f,f+'.html')
except IOError:
if verbose: print '(IOError!)',
if verbose:
print
print 'Done.'
if __name__=='__main__':
main(sys.argv)
| Python |
#!/usr/bin/python -u
# -*- coding: utf-8 -*-
""" Python Highlighter Version: 0.8
py2html.py [options] files...
options:
-h print help
- read from stdin, write to stdout
-stdout read from files, write to stdout
-files read from files, write to filename+'.html' (default)
-format:
html output XHTML page (default)
rawhtml output pure XHTML (without headers, titles, etc.)
-mode:
color output in color (default)
mono output b/w (for printing)
-title:Title use 'Title' as title of the generated page
-bgcolor:color use color as background-color for page
-header:file use contents of file as header
-footer:file use contents of file as footer
-URL replace all occurances of 'URL: link' with
'<a href="link">link</a>'; this is always enabled
in CGI mode
-v verbose
Takes the input, assuming it is Python code and formats it into
colored XHTML. When called without parameters the script tries to
work in CGI mode. It looks for a field 'script=URL' and tries to
use that URL as input file. If it can't find this field, the path
info (the part of the URL following the CGI script name) is
tried. In case no host is given, the host where the CGI script
lives and HTTP are used.
* Uses Just van Rossum's PyFontify version 0.3 to tag Python scripts.
You can get it via his homepage on starship:
URL: http://starship.python.net/crew/just
"""
__comments__ = """
The following snippet is a small shell script I use for viewing
Python scripts via less on Unix:
pyless:
#!/bin/sh
# Browse pretty printed Python code using ANSI codes for highlighting
py2html -stdout -format:ansi -mode:color $* | less -r
History:
0.8: Added patch by Patrick Lynch to have py2html.py use style
sheets for markup
0.7: Added patch by Ville Skytt‰ to make py2html.py output
valid XHTML.
0.6: Fixed a bug in .escape_html(); thanks to Vespe Savikko for
finding this one.
0.5: Added a few suggestions by Kevin Ng to make the CGI version
a little more robust.
"""
__copyright__ = """\
Copyright (c) 1998-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2002, eGenix.com Software GmbH; mailto:info@egenix.com
Distributed under the terms and conditions of the eGenix.com Public
License. See http://www.egenix.com/files/python/mxLicense.html for
details, or contact the author. All Rights Reserved.\
"""
__version__ = '0.8'
__cgifooter__ = ('\n<pre># code highlighted using <a href='
'"http://www.lemburg.com/files/python/">py2html.py</a> '
'version %s</pre>\n' % __version__)
import sys,string,re
# Adjust path so that PyFontify is found...
sys.path.append('.')
### Constants
# URL of the input form the user is redirected to in case no script=xxx
# form field is given. The URL *must* be absolute. Leave blank to
# have the script issue an error instead.
INPUT_FORM = 'http://www.lemburg.com/files/python/SoftwareDescriptions.html#py2html.py'
# HTML DOCTYPE and XML namespace
HTML_DOCTYPE = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
HTML_XMLNS = ' xmlns="http://www.w3.org/1999/xhtml"'
### Helpers
def fileio(file, mode='rb', data=None, close=0):
if type(file) == type(''):
f = open(file,mode)
close = 1
else:
f = file
if data:
f.write(data)
else:
data = f.read()
if close: f.close()
return data
### Converter class
class PrettyPrint:
""" generic Pretty Printer class
* supports tagging Python scripts in the following ways:
# format/mode | color mono
# --------------------------
# rawhtml | x x (HTML without headers, etc.)
# html | x x (a HTML page with HEAD&BODY:)
# ansi | x x (with Ansi-escape sequences)
* interfaces:
file_filter -- takes two files: input & output (may be stdin/stdout)
filter -- takes a string and returns the highlighted version
* to create an instance use:
c = PrettyPrint(tagfct,format,mode)
where format and mode must be strings according to the
above table if you plan to use PyFontify.fontify as
tagfct
* the tagfct has to take one argument, text, and return a taglist
(format: [(id,left,right,sublist),...], where id is the
"name" given to the slice left:right in text and sublist is a
taglist for tags inside the slice or None)
"""
# misc settings
title = ''
bgcolor = '#FFFFFF'
css = ''
header = ''
footer = ''
replace_URLs = 0
# formats to be used
formats = {}
def __init__(self,tagfct=None,format='html',mode='color'):
self.tag = tagfct
self.set_mode = getattr(self,'set_mode_%s_%s' % (format, mode))
self.filter = getattr(self,'filter_%s' % format)
def file_filter(self,infile,outfile):
self.set_mode()
text = fileio(infile,'r')
if type(infile) == type('') and self.title == '':
self.title = infile
fileio(outfile,'w',self.filter(text))
### Set pre- and postfixes for formats & modes
#
# These methods must set self.formats to a dictionary having
# an entry for every tag returned by the tagging function.
#
# The format used is simple:
# tag:(prefix,postfix)
# where prefix and postfix are either strings or callable objects,
# that return a string (they are called with the matching tag text
# as only parameter). prefix is inserted in front of the tag, postfix
# is inserted right after the tag.
def set_mode_html_color(self):
self.css = """
<STYLE TYPE="text/css">
<!--
body{ background: %s; }
.PY_KEYWORD{ color: #0000C0; font-weight: bold; }
.PY_COMMENT{ color: #000080; }
.PY_PARAMETER{ color: #C00000; }
.PY_IDENTIFIER{ color: #C00000; font-weight: bold; }
.PY_STRING{ color: #008000; }
-->
</STYLE> """ % self.bgcolor
self.formats = {
'all':('<pre>','</pre>'),
'comment':('<span class="PY_COMMENT">','</span>'),
'keyword':('<span class="PY_KEYWORD">','</span>'),
'parameter':('<span class="PY_PARAMETER">','</span>'),
'identifier':( lambda x,strip=string.strip:
'<a name="%s"><span class="PY_IDENTIFIER">' % (strip(x)),
'</span></a>'),
'string':('<span class="PY_STRING">','</span>')
}
set_mode_rawhtml_color = set_mode_html_color
def set_mode_html_mono(self):
self.css = """
<STYLE TYPE="text/css">
<!--
body{ background-color: %s }
.PY_KEYWORD{ text-decoration: underline }
.PY_COMMENT{ }
.PY_PARAMETER{ }
.PY_IDENTIFIER{ font-weight: bold}
.PY_STRING{ font-style: italic}
-->
</STYLE> """ % self.bgcolor
self.formats = {
'all':('<pre>','</pre>'),
'comment':('<span class="PY_COMMENT">','</span>'),
'keyword':( '<span class="PY_KEYWORD">','</span>'),
'parameter':('<span class="PY_PARAMETER">','</span>'),
'identifier':( lambda x,strip=string.strip:
'<a name="%s"><span class="PY_IDENTIFIER">' % (strip(x)),
'</span></a>'),
'string':('<span class="PY_STRING">','</span>')
}
set_mode_rawhtml_mono = set_mode_html_mono
def set_mode_ansi_mono(self):
self.formats = {
'all':('',''),
'comment':('\033[2m','\033[m'),
'keyword':('\033[4m','\033[m'),
'parameter':('',''),
'identifier':('\033[1m','\033[m'),
'string':('','')
}
def set_mode_ansi_color(self):
self.formats = {
'all':('',''),
'comment':('\033[34;2m','\033[m'),
'keyword':('\033[1;34m','\033[m'),
'parameter':('',''),
'identifier':('\033[1;31m','\033[m'),
'string':('\033[32;2m','\033[m')
}
### Filters for Python scripts given as string
def escape_html(self,text):
t = (('&','&'),('<','<'),('>','>'))
for x,y in t:
text = string.join(string.split(text,x),y)
return text
def filter_html(self,text):
output = self.fontify(self.escape_html(text))
if self.replace_URLs:
output = re.sub('URL:([ \t]+)([^ \n\r<]+)',
'URL:\\1<a href="\\2">\\2</a>',output)
html = """%s<html%s>
<head>
<title>%s</title>
<!--css-->
%s
</head>
<body>
<!--header-->
%s
<!--script-->
%s
<!--footer-->
%s
</body></html>\n"""%(HTML_DOCTYPE,
HTML_XMLNS,
self.title,
self.css,
self.header,
output,
self.footer)
return html
def filter_rawhtml(self,text):
output = self.fontify(self.escape_html(text))
if self.replace_URLs:
output = re.sub('URL:([ \t]+)([^ \n\r<]+)',
'URL:\\1<a href="\\2">\\2</a>',output)
return self.header + output + self.footer
def filter_ansi(self,text):
output = self.fontify(text)
return self.header + output + self.footer
### Fontify engine
def fontify(self,pytext):
# parse
taglist = self.tag(pytext)
# prepend special 'all' tag:
taglist[:0] = [('all',0,len(pytext),None)]
# prepare splitting
splits = []
addsplits(splits,pytext,self.formats,taglist)
# do splitting & inserting
splits.sort()
l = []
li = 0
for ri,dummy,insert in splits:
if ri > li: l.append(pytext[li:ri])
l.append(insert)
li = ri
if li < len(pytext): l.append(pytext[li:])
return string.join(l,'')
def addsplits(splits,text,formats,taglist):
""" Helper for .fontify()
"""
for id,left,right,sublist in taglist:
try:
pre,post = formats[id]
except KeyError:
# sys.stderr.write('Warning: no format for %s specified\n'%repr(id))
pre,post = '',''
if type(pre) != type(''):
pre = pre(text[left:right])
if type(post) != type(''):
post = post(text[left:right])
# len(splits) is a dummy used to make sorting stable
splits.append((left,len(splits),pre))
if sublist:
addsplits(splits,text,formats,sublist)
splits.append((right,len(splits),post))
def write_html_error(titel,text):
print """\
%s<html%s><head><title>%s</title></head>
<body>
<h2>%s</h2>
%s
</body></html>
""" % (HTML_DOCTYPE,HTML_XMLNS,titel,titel,text)
def redirect_to(url):
sys.stdout.write('Content-Type: text/html\r\n')
sys.stdout.write('Status: 302\r\n')
sys.stdout.write('Location: %s\r\n\r\n' % url)
print """
%s<html%s><head>
<title>302 Moved Temporarily</title>
</head><body>
<h1>302 Moved Temporarily</h1>
The document has moved to <a href="%s">%s</a>.<p></p>
</body></html>
""" % (HTML_DOCTYPE,HTML_XMLNS,url,url)
def main(cmdline):
""" main(cmdline) -- process cmdline as if it were sys.argv
"""
# parse options/files
options = []
optvalues = {}
for o in cmdline[1:]:
if o[0] == '-':
if ':' in o:
k,v = tuple(string.split(o,':'))
optvalues[k] = v
options.append(k)
else:
options.append(o)
else:
break
files = cmdline[len(options)+1:]
### create converting object
# load fontifier
if '-marcs' in options:
# use mxTextTool's tagging engine as fontifier
from mx.TextTools import tag
from mx.TextTools.Examples.Python import python_script
tagfct = lambda text,tag=tag,pytable=python_script: \
tag(text,pytable)[1]
print "Py2HTML: using Marc's tagging engine"
else:
# load Just's fontifier
try:
import PyFontify
if PyFontify.__version__ < '0.3': raise ValueError
tagfct = PyFontify.fontify
except:
print """
Sorry, but this script needs the PyFontify.py module version 0.3;
You can download it from Just's homepage at
URL: http://starship.python.net/crew/just
"""
sys.exit()
if '-format' in options:
format = optvalues['-format']
else:
# use default
format = 'html'
if '-mode' in options:
mode = optvalues['-mode']
else:
# use default
mode = 'color'
c = PrettyPrint(tagfct,format,mode)
convert = c.file_filter
### start working
if '-title' in options:
c.title = optvalues['-title']
if '-bgcolor' in options:
c.bgcolor = optvalues['-bgcolor']
if '-header' in options:
try:
f = open(optvalues['-header'])
c.header = f.read()
f.close()
except IOError:
if verbose: print 'IOError: header file not found'
if '-footer' in options:
try:
f = open(optvalues['-footer'])
c.footer = f.read()
f.close()
except IOError:
if verbose: print 'IOError: footer file not found'
if '-URL' in options:
c.replace_URLs = 1
if '-' in options:
convert(sys.stdin,sys.stdout)
sys.exit()
if '-h' in options:
print __doc__
sys.exit()
if len(files) == 0:
# Turn URL processing on
c.replace_URLs = 1
# Try CGI processing...
import cgi,urllib,urlparse,os
form = cgi.FieldStorage()
if not form.has_key('script'):
# Ok, then try pathinfo
if not os.environ.has_key('PATH_INFO'):
if INPUT_FORM:
redirect_to(INPUT_FORM)
else:
sys.stdout.write('Content-Type: text/html\r\n\r\n')
write_html_error('Missing Parameter',
'Missing script=URL field in request')
sys.exit(1)
url = os.environ['PATH_INFO'][1:] # skip the leading slash
else:
url = form['script'].value
sys.stdout.write('Content-Type: text/html\r\n\r\n')
scheme, host, path, params, query, frag = urlparse.urlparse(url)
if not host:
scheme = 'http'
if os.environ.has_key('HTTP_HOST'):
host = os.environ['HTTP_HOST']
else:
host = 'localhost'
url = urlparse.urlunparse((scheme, host, path, params, query, frag))
#print url; sys.exit()
network = urllib.URLopener()
try:
tempfile,headers = network.retrieve(url)
except IOError,reason:
write_html_error('Error opening "%s"' % url,
'The given URL could not be opened. Reason: %s' %\
str(reason))
sys.exit(1)
f = open(tempfile,'rb')
c.title = url
c.footer = __cgifooter__
convert(f,sys.stdout)
f.close()
network.close()
sys.exit()
if '-stdout' in options:
filebreak = '-'*72
for f in files:
try:
if len(files) > 1:
print filebreak
print 'File:',f
print filebreak
convert(f,sys.stdout)
except IOError:
pass
else:
verbose = ('-v' in options)
if verbose:
print 'Py2HTML: working on',
for f in files:
try:
if verbose: print f,
convert(f,f+'.html')
except IOError:
if verbose: print '(IOError!)',
if verbose:
print
print 'Done.'
if __name__=='__main__':
main(sys.argv)
| Python |
"""Module to analyze Python source code; for syntax coloring tools.
Interface:
tags = fontify(pytext, searchfrom, searchto)
The 'pytext' argument is a string containing Python source code.
The (optional) arguments 'searchfrom' and 'searchto' may contain a slice in pytext.
The returned value is a list of tuples, formatted like this:
[('keyword', 0, 6, None), ('keyword', 11, 17, None), ('comment', 23, 53, None), etc. ]
The tuple contents are always like this:
(tag, startindex, endindex, sublist)
tag is one of ('comment', 'string', 'keyword', 'function', 'class')
sublist is not used, hence always None.
"""
# Based on FontText.py by Mitchell S. Chapman,
# which was modified by Zachary Roadhouse,
# then un-Tk'd by Just van Rossum.
# Many thanks for regular expression debugging & authoring are due to:
# Tim (the-incredib-ly y'rs) Peters and Cristian Tismer
# So, who owns the copyright? ;-) How about this:
# Copyright 1996-1997:
# Mitchell S. Chapman,
# Zachary Roadhouse,
# Tim Peters,
# Just van Rossum
#
# Version 0.4 - changes copyright (C) 2001 Mark Pilgrim (f8dy@diveintopython.org)
# 2001/02/05 - MAP - distinguish between class and function identifiers
# 2001/03/21 - MAP - get keywords from keyword module (instead of hard-coded list)
# 2001/03/22 - MAP - use re module instead of deprecated regex module
__version__ = "0.4"
import string, re, keyword
# Build up a regular expression which will match anything
# interesting, including multi-line triple-quoted strings.
commentPat = "#.*"
pat = "q[^\q\n]*(\\\\[\000-\377][^\q\n]*)*q"
quotePat = string.replace(pat, "q", "'") + "|" + string.replace(pat, 'q', '"')
# Way to go, Tim!
pat = """
qqq
[^\\q]*
(
( \\\\[\000-\377]
| q
( \\\\[\000-\377]
| [^\\q]
| q
( \\\\[\000-\377]
| [^\\q]
)
)
)
[^\\q]*
)*
qqq
"""
pat = string.join(string.split(pat), '') # get rid of whitespace
tripleQuotePat = string.replace(pat, "q", "'") + "|" + string.replace(pat, 'q', '"')
# Build up a regular expression which matches all and only
# Python keywords. This will let us skip the uninteresting
# identifier references.
# nonKeyPat identifies characters which may legally precede
# a keyword pattern.
nonKeyPat = "(^|[^a-zA-Z0-9_.\"'])"
keywordsPat = string.join(keyword.kwlist, "|")
keyPat = nonKeyPat + "(" + keywordsPat + ")" + nonKeyPat
matchPat = keyPat + "|" + commentPat + "|" + tripleQuotePat + "|" + quotePat
matchRE = re.compile(matchPat)
idKeyPat = "[ \t]*[A-Za-z_][A-Za-z_0-9.]*" # Ident w. leading whitespace.
idRE = re.compile(idKeyPat)
def fontify(pytext, searchfrom=0, searchto=None):
if searchto is None:
searchto = len(pytext)
tags = []
commentTag = 'comment'
stringTag = 'string'
keywordTag = 'keyword'
functionTag = 'function'
classTag = 'class'
start = 0
end = searchfrom
while 1:
matchObject = matchRE.search(pytext, end)
if not matchObject: break
(start, end) = matchObject.span()
match = matchObject.group(0)
c = match[0]
if c not in "#'\"":
# Must have matched a keyword.
if start <> searchfrom:
# there's still a redundant char before and after it, strip!
match = match[1:-1]
start = start + 1
else:
# this is the first keyword in the text.
# Only a space at the end.
match = match[:-1]
end = end - 1
tags.append((keywordTag, start, end, None))
# If this was a defining keyword, look ahead to the
# following identifier.
if match in ["def", "class"]:
idMatchObject = idRE.search(pytext, end)
if idMatchObject:
(start, end) = idMatchObject.span()
match = idMatchObject.group(0)
tags.append(((match=='def') and functionTag or classTag, start, end, None))
elif c == "#":
tags.append((commentTag, start, end, None))
else:
tags.append((stringTag, start, end, None))
return tags
def test(path):
f = open(path)
text = f.read()
f.close()
tags = fontify(text)
for tag, start, end, sublist in tags:
print tag, `text[start:end]`, start, end | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
BLIPConnectionTest.py
Created by Jens Alfke on 2008-06-04.
This source file is test/example code, and is in the public domain.
"""
from BLIP import Connection, OutgoingRequest, kOpening
import asyncore
from cStringIO import StringIO
from datetime import datetime
import logging
import random
import unittest
kSendInterval = 0.2
kNBatchedMessages = 10
kUrgentEvery = 4
def randbool():
return random.randint(0,1) == 1
class BLIPConnectionTest(unittest.TestCase):
def setUp(self):
self.connection = Connection( ('localhost',46353) )
self.nRepliesPending = 0
def sendRequest(self):
size = random.randint(0,32767)
io = StringIO()
for i in xrange(0,size):
io.write( chr(i % 256) )
body = io.getvalue()
io.close
req = OutgoingRequest(self.connection, body,{'Content-Type': 'application/octet-stream',
'User-Agent': 'PyBLIP',
'Date': datetime.now(),
'Size': size})
req.compressed = randbool()
req.urgent = (random.randint(0,kUrgentEvery-1)==0)
req.response.onComplete = self.gotResponse
return req.send()
def gotResponse(self, response):
self.nRepliesPending -= 1
logging.info("Got response!: %s (%i pending)",response,self.nRepliesPending)
request = response.request
assert response.body == request.body
def testClient(self):
lastReqTime = None
nIterations = 0
while nIterations < 10:
asyncore.loop(timeout=kSendInterval,count=1)
now = datetime.now()
if self.connection.status!=kOpening and (not lastReqTime or (now-lastReqTime).microseconds >= kSendInterval*1.0e6):
lastReqTime = now
for i in xrange(0,kNBatchedMessages):
if not self.sendRequest():
logging.warn("Couldn't send request (connection is probably closed)")
break;
self.nRepliesPending += 1
nIterations += 1
def tearDown(self):
self.connection.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
unittest.main()
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
BLIPListenerTest.py
Created by Jens Alfke on 2008-06-04.
This source file is test/example code, and is in the public domain.
"""
from BLIP import Listener
import asyncore
import logging
import unittest
class BLIPListenerTest(unittest.TestCase):
def testListener(self):
def handleRequest(request):
logging.info("Got request!: %r",request)
body = request.body
assert len(body)<32768
assert request.contentType == 'application/octet-stream'
assert int(request['Size']) == len(body)
assert request['User-Agent'] != None
for i in xrange(0,len(request.body)):
assert ord(body[i]) == i%256
response = request.response
response.body = request.body
response['Content-Type'] = request.contentType
response.send()
listener = Listener(46353)
listener.onRequest = handleRequest
logging.info("Listener is waiting...")
try:
asyncore.loop()
except KeyboardInterrupt:
logging.info("KeyboardInterrupt")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
unittest.main()
| Python |
# encoding: utf-8
"""
BLIP.py
Created by Jens Alfke on 2008-06-03.
Copyright notice and BSD license at end of file.
"""
import asynchat
import asyncore
from cStringIO import StringIO
import logging
import socket
import struct
import sys
import traceback
import zlib
# Connection status enumeration:
kDisconnected = -1
kClosed = 0
kOpening = 1
kOpen = 2
kClosing = 3
# INTERNAL CONSTANTS -- NO TOUCHIES!
kFrameMagicNumber = 0x9B34F205
kFrameHeaderFormat = '!LLHH'
kFrameHeaderSize = 12
kMsgFlag_TypeMask = 0x000F
kMsgFlag_Compressed = 0x0010
kMsgFlag_Urgent = 0x0020
kMsgFlag_NoReply = 0x0040
kMsgFlag_MoreComing = 0x0080
kMsgType_Request = 0
kMsgType_Response = 1
kMsgType_Error = 2
log = logging.getLogger('BLIP')
log.propagate = True
class MessageException(Exception):
pass
class ConnectionException(Exception):
pass
### LISTENER AND CONNECTION CLASSES:
class Listener (asyncore.dispatcher):
"BLIP listener/server class"
def __init__(self, port, sslKeyFile=None, sslCertFile=None):
"Create a listener on a port"
asyncore.dispatcher.__init__(self)
self.onConnected = self.onRequest = None
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind( ('',port) )
self.listen(5)
self.sslKeyFile=sslKeyFile
self.sslCertFile=sslCertFile
log.info("Listening on port %u", port)
def handle_accept( self ):
socket,address = self.accept()
if self.sslKeyFile:
socket.ssl(socket,self.sslKeyFile,self.sslCertFile)
conn = Connection(address, sock=socket, listener=self)
conn.onRequest = self.onRequest
if self.onConnected:
self.onConnected(conn)
def handle_error(self):
(typ,val,trace) = sys.exc_info()
log.error("Listener caught: %s %s\n%s", typ,val,traceback.format_exc())
self.close()
class Connection (asynchat.async_chat):
def __init__( self, address, sock=None, listener=None, ssl=None ):
"Opens a connection with the given address. If a connection/socket object is provided it'll use that,"
"otherwise it'll open a new outgoing socket."
if sock:
asynchat.async_chat.__init__(self,sock)
log.info("Accepted connection from %s",address)
self.status = kOpen
else:
asynchat.async_chat.__init__(self)
log.info("Opening connection to %s",address)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.status = kOpening
if ssl:
ssl(self.socket)
self.connect(address)
self.address = address
self.listener = listener
self.onRequest = None
self.pendingRequests = {}
self.pendingResponses = {}
self.outBox = []
self.inMessage = None
self.inNumRequests = self.outNumRequests = 0
self.sending = False
self._endOfFrame()
def close(self):
if self.status > kClosed:
self.status = kClosing
log.info("Connection closing...")
asynchat.async_chat.close(self)
def handle_connect(self):
log.info("Connection open!")
self.status = kOpen
def handle_error(self):
(typ,val,trace) = sys.exc_info()
log.error("Connection caught: %s %s\n%s", typ,val,traceback.format_exc())
self.discard_buffers()
self.status = kDisconnected
self.close()
def handle_close(self):
log.info("Connection closed!")
self.pendingRequests = self.pendingResponses = None
self.outBox = None
if self.status == kClosing:
self.status = kClosed
else:
self.status = kDisconnected
asynchat.async_chat.handle_close(self)
### SENDING:
@property
def canSend(self):
return self.status==kOpening or self.status==kOpen
def _sendMessage(self, msg):
if self.canSend:
self._outQueueMessage(msg,True)
if not self.sending:
log.debug("Waking up the output stream")
self.sending = True
self.push_with_producer(self)
return True
else:
return False
def _sendRequest(self, req):
if self.canSend:
requestNo = req.requestNo = self.outNumRequests = self.outNumRequests + 1
response = req.response
if response:
response.requestNo = requestNo
self.pendingResponses[requestNo] = response
log.debug("pendingResponses[%i] := %s",requestNo,response)
return self._sendMessage(req)
else:
return False
def _outQueueMessage(self, msg,isNew=True):
n = len(self.outBox)
index = n
if msg.urgent and n>1:
while index > 0:
otherMsg = self.outBox[index-1]
if otherMsg.urgent:
if index<n:
index += 1
break
elif isNew and otherMsg.bytesSent==0:
break
index -= 1
else:
index = 1
self.outBox.insert(index,msg)
if isNew:
log.info("Queuing %s at index %i",msg,index)
else:
log.debug("Re-queueing outgoing message at index %i of %i",index,len(self.outBox))
def more(self):
n = len(self.outBox)
if n > 0:
msg = self.outBox.pop(0)
frameSize = 4096
if msg.urgent or n==1 or not self.outBox[0].urgent:
frameSize *= 4
data = msg._sendNextFrame(frameSize)
if msg._moreComing:
self._outQueueMessage(msg,isNew=False)
else:
log.info("Finished sending %s",msg)
return data
else:
log.debug("Nothing more to send")
self.sending = False
return None
### RECEIVING:
def collect_incoming_data(self, data):
if self.expectingHeader:
if self.inHeader==None:
self.inHeader = data
else:
self.inHeader += data
elif self.inMessage:
self.inMessage._receivedData(data)
def found_terminator(self):
if self.expectingHeader:
# Got a header:
(magic, requestNo, flags, frameLen) = struct.unpack(kFrameHeaderFormat,self.inHeader)
self.inHeader = None
if magic!=kFrameMagicNumber: raise ConnectionException, "Incorrect frame magic number %x" %magic
if frameLen < kFrameHeaderSize: raise ConnectionException,"Invalid frame length %u" %frameLen
frameLen -= kFrameHeaderSize
log.debug("Incoming frame: type=%i, number=%i, flags=%x, length=%i",
(flags&kMsgFlag_TypeMask),requestNo,flags,frameLen)
self.inMessage = self._inMessageForFrame(requestNo,flags)
if frameLen > 0:
self.expectingHeader = False
self.set_terminator(frameLen)
else:
self._endOfFrame()
else:
# Got the frame's payload:
self._endOfFrame()
def _inMessageForFrame(self, requestNo,flags):
message = None
msgType = flags & kMsgFlag_TypeMask
if msgType==kMsgType_Request:
message = self.pendingRequests.get(requestNo)
if message==None and requestNo == self.inNumRequests+1:
message = IncomingRequest(self,requestNo,flags)
assert message!=None
self.pendingRequests[requestNo] = message
self.inNumRequests += 1
elif msgType==kMsgType_Response or msgType==kMsgType_Error:
message = self.pendingResponses.get(requestNo)
if message != None:
message._beginFrame(flags)
else:
log.warning("Ignoring unexpected frame with type %u, request #%u", msgType,requestNo)
return message
def _endOfFrame(self):
msg = self.inMessage
self.inMessage = None
self.expectingHeader = True
self.inHeader = None
self.set_terminator(kFrameHeaderSize) # wait for binary header
if msg:
log.debug("End of frame of %s",msg)
if not msg._moreComing:
self._receivedMessage(msg)
def _receivedMessage(self, msg):
log.info("Received: %s",msg)
# Remove from pending:
if msg.isResponse:
del self.pendingResponses[msg.requestNo]
else:
del self.pendingRequests[msg.requestNo]
# Decode:
try:
msg._finished()
if not msg.isResponse:
self.onRequest(msg)
except Exception, x:
log.error("Exception handling incoming message: %s", traceback.format_exc())
#FIX: Send an error reply
### MESSAGE CLASSES:
class Message (object):
"Abstract superclass of all request/response objects"
def __init__(self, connection, body=None, properties=None):
self.connection = connection
self.body = body
self.properties = properties or {}
self.requestNo = None
@property
def flags(self):
if self.isResponse:
flags = kMsgType_Response
else:
flags = kMsgType_Request
if self.urgent: flags |= kMsgFlag_Urgent
if self.compressed: flags |= kMsgFlag_Compressed
if self.noReply: flags |= kMsgFlag_NoReply
if self._moreComing:flags |= kMsgFlag_MoreComing
return flags
def __str__(self):
s = "%s[" %(type(self).__name__)
if self.requestNo != None:
s += "#%i" %self.requestNo
if self.urgent: s += " URG"
if self.compressed: s += " CMP"
if self.noReply: s += " NOR"
if self._moreComing:s += " MOR"
if self.body: s += " %i bytes" %len(self.body)
return s+"]"
def __repr__(self):
s = str(self)
if len(self.properties): s += repr(self.properties)
return s
@property
def isResponse(self):
"Is this message a response?"
return False
@property
def contentType(self):
return self.properties.get('Content-Type')
def __getitem__(self, key): return self.properties.get(key)
def __contains__(self, key): return key in self.properties
def __len__(self): return len(self.properties)
def __nonzero__(self): return True
def __iter__(self): return self.properties.__iter__()
class IncomingMessage (Message):
"Abstract superclass of incoming messages."
def __init__(self, connection, requestNo, flags):
super(IncomingMessage,self).__init__(connection)
self.requestNo = requestNo
self.urgent = (flags & kMsgFlag_Urgent) != 0
self.compressed = (flags & kMsgFlag_Compressed) != 0
self.noReply = (flags & kMsgFlag_NoReply) != 0
self._moreComing= (flags & kMsgFlag_MoreComing) != 0
self.frames = []
def _beginFrame(self, flags):
"""Received a frame header."""
self._moreComing = (flags & kMsgFlag_MoreComing)!=0
def _receivedData(self, data):
"""Received data from a frame."""
self.frames.append(data)
def _finished(self):
"""The entire message has been received; now decode it."""
encoded = "".join(self.frames)
self.frames = None
# Decode the properties:
if len(encoded) < 2: raise MessageException, "missing properties length"
propSize = 2 + struct.unpack('!H',encoded[0:2])[0]
if propSize>len(encoded): raise MessageException, "properties too long to fit"
if propSize>2 and encoded[propSize-1] != '\000': raise MessageException, "properties are not nul-terminated"
proplist = encoded[2:propSize-1].split('\000')
encoded = encoded[propSize:]
if len(proplist) & 1: raise MessageException, "odd number of property strings"
for i in xrange(0,len(proplist),2):
def expand(str):
if len(str)==1:
str = IncomingMessage.__expandDict.get(str,str)
return str
self.properties[ expand(proplist[i])] = expand(proplist[i+1])
# Decode the body:
if self.compressed and len(encoded)>0:
try:
encoded = zlib.decompress(encoded,31) # window size of 31 needed for gzip format
except zlib.error:
raise MessageException, sys.exc_info()[1]
self.body = encoded
__expandDict= {'\x01' : "Content-Type",
'\x02' : "Profile",
'\x03' : "application/octet-stream",
'\x04' : "text/plain; charset=UTF-8",
'\x05' : "text/xml",
'\x06' : "text/yaml",
'\x07' : "Channel",
'\x08' : "Error-Code",
'\x09' : "Error-Domain"}
class OutgoingMessage (Message):
"Abstract superclass of outgoing requests/responses."
def __init__(self, connection, body=None, properties=None):
Message.__init__(self,connection,body,properties)
self.urgent = self.compressed = self.noReply = False
self._moreComing = True
def __setitem__(self, key,val):
self.properties[key] = val
def __delitem__(self, key):
del self.properties[key]
@property
def sent(self):
return hasattr(self,'encoded')
def _encode(self):
"Generates the message's encoded form, prior to sending it."
out = StringIO()
for (key,value) in self.properties.iteritems():
def _writePropString(s):
out.write(str(s)) #FIX: Abbreviate
out.write('\000')
_writePropString(key)
_writePropString(value)
propertiesSize = out.tell()
assert propertiesSize<65536 #FIX: Return an error instead
body = self.body
if self.compressed:
z = zlib.compressobj(6,zlib.DEFLATED,31) # window size of 31 needed for gzip format
out.write(z.compress(body))
body = z.flush()
out.write(body)
self.encoded = struct.pack('!H',propertiesSize) + out.getvalue()
out.close()
log.debug("Encoded %s into %u bytes", self,len(self.encoded))
self.bytesSent = 0
def _sendNextFrame(self, maxLen):
pos = self.bytesSent
payload = self.encoded[pos:pos+maxLen]
pos += len(payload)
self._moreComing = (pos < len(self.encoded))
if not self._moreComing:
self.encoded = None
log.debug("Sending frame of %s; bytes %i--%i", self,pos-len(payload),pos)
header = struct.pack(kFrameHeaderFormat, kFrameMagicNumber,
self.requestNo,
self.flags,
kFrameHeaderSize+len(payload))
self.bytesSent = pos
return header + payload
class Request (object):
@property
def response(self):
"The response object for this request."
if self.noReply:
return None
r = self.__dict__.get('_response')
if r==None:
r = self._response = self._createResponse()
return r
class Response (Message):
def _setRequest(self, request):
assert not request.noReply
self.request = request
self.requestNo = request.requestNo
self.urgent = request.urgent
@property
def isResponse(self):
return True
class IncomingRequest (IncomingMessage, Request):
def _createResponse(self):
return OutgoingResponse(self)
class OutgoingRequest (OutgoingMessage, Request):
def _createResponse(self):
return IncomingResponse(self)
def send(self):
self._encode()
return self.connection._sendRequest(self) and self.response
class IncomingResponse (IncomingMessage, Response):
def __init__(self, request):
IncomingMessage.__init__(self,request.connection,None,0)
self._setRequest(request)
self.onComplete = None
def _finished(self):
super(IncomingResponse,self)._finished()
if self.onComplete:
try:
self.onComplete(self)
except Exception, x:
log.error("Exception dispatching response: %s", traceback.format_exc())
class OutgoingResponse (OutgoingMessage, Response):
def __init__(self, request):
OutgoingMessage.__init__(self,request.connection)
self._setRequest(request)
def send(self):
self._encode()
return self.connection._sendMessage(self)
"""
Copyright (c) 2008, Jens Alfke <jens@mooseyard.com>. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRI-
BUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
BLIPListenerTest.py
Created by Jens Alfke on 2008-06-04.
This source file is test/example code, and is in the public domain.
"""
from BLIP import Listener
import asyncore
import logging
import unittest
class BLIPListenerTest(unittest.TestCase):
def testListener(self):
def handleRequest(request):
logging.info("Got request!: %r",request)
body = request.body
assert len(body)<32768
assert request.contentType == 'application/octet-stream'
assert int(request['Size']) == len(body)
assert request['User-Agent'] != None
for i in xrange(0,len(request.body)):
assert ord(body[i]) == i%256
response = request.response
response.body = request.body
response['Content-Type'] = request.contentType
response.send()
listener = Listener(46353)
listener.onRequest = handleRequest
logging.info("Listener is waiting...")
try:
asyncore.loop()
except KeyboardInterrupt:
logging.info("KeyboardInterrupt")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
unittest.main()
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
BLIPConnectionTest.py
Created by Jens Alfke on 2008-06-04.
This source file is test/example code, and is in the public domain.
"""
from BLIP import Connection, OutgoingRequest, kOpening
import asyncore
from cStringIO import StringIO
from datetime import datetime
import logging
import random
import unittest
kSendInterval = 0.2
kNBatchedMessages = 10
kUrgentEvery = 4
def randbool():
return random.randint(0,1) == 1
class BLIPConnectionTest(unittest.TestCase):
def setUp(self):
self.connection = Connection( ('localhost',46353) )
self.nRepliesPending = 0
def sendRequest(self):
size = random.randint(0,32767)
io = StringIO()
for i in xrange(0,size):
io.write( chr(i % 256) )
body = io.getvalue()
io.close
req = OutgoingRequest(self.connection, body,{'Content-Type': 'application/octet-stream',
'User-Agent': 'PyBLIP',
'Date': datetime.now(),
'Size': size})
req.compressed = randbool()
req.urgent = (random.randint(0,kUrgentEvery-1)==0)
req.response.onComplete = self.gotResponse
return req.send()
def gotResponse(self, response):
self.nRepliesPending -= 1
logging.info("Got response!: %s (%i pending)",response,self.nRepliesPending)
request = response.request
assert response.body == request.body
def testClient(self):
lastReqTime = None
nIterations = 0
while nIterations < 10:
asyncore.loop(timeout=kSendInterval,count=1)
now = datetime.now()
if self.connection.status!=kOpening and (not lastReqTime or (now-lastReqTime).microseconds >= kSendInterval*1.0e6):
lastReqTime = now
for i in xrange(0,kNBatchedMessages):
if not self.sendRequest():
logging.warn("Couldn't send request (connection is probably closed)")
break;
self.nRepliesPending += 1
nIterations += 1
def tearDown(self):
self.connection.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
unittest.main()
| Python |
import JSTalk
vp = JSTalk.application("VoodooPad Pro")
print(vp)
firstDoc = vp.orderedDocuments().objectAtIndex_(0)
for pageKey in firstDoc.keys():
print(pageKey)
page = firstDoc.pageForKey_(pageKey)
if (page.uti() == "com.fm.page"):
pageText = page.dataAsAttributedString().string()
print(pageText)
| Python |
from Foundation import *
from AppKit import *
import time
def application(appName):
appPath = NSWorkspace.sharedWorkspace().fullPathForApplication_(appName);
if (not appPath):
print("Could not find application '" + appName + "'")
return None
appBundle = NSBundle.bundleWithPath_(appPath)
bundleId = appBundle.bundleIdentifier()
NSWorkspace.sharedWorkspace().launchAppWithBundleIdentifier_options_additionalEventParamDescriptor_launchIdentifier_(bundleId, NSWorkspaceLaunchWithoutActivation | NSWorkspaceLaunchAsync, None, None)
port = bundleId + ".JSTalk"
conn = None
tries = 0
while ((conn is None) and (tries < 10)):
conn = NSConnection.connectionWithRegisteredName_host_(port, None)
tries = tries + 1;
if (not conn):
time.sleep(1)
if (not conn):
print("Could not find a JSTalk connection to " + appName)
return None
return conn.rootProxy()
def proxyForApp(appName):
return application(appName) | Python |
#!/usr/bin/python
import sys,string
from socket import *
import hashlib
import random
server_host_port=[
["127.0.0.1",12324],
["127.0.0.1",12322],
["127.0.0.1",12323]
]
sock_con_fd=[]
def store_key(key):
host_index = int(hashlib.md5(key).hexdigest(),16) % len(server_host_port)
print host_index
now_host_hostname = server_host_port[host_index][0]
now_host_port = server_host_port[host_index][1]
print now_host_hostname
print now_host_port
# connect to server on the port
request ='set ' + key + ' 0 0 3 123'
print request
sock_con_fd[host_index][0].send(request)
def connect_host():
for host_index in range(len(server_host_port)):
sock_con_fd.append([])
sock_fd = socket(AF_INET, SOCK_STREAM)
print sock_fd
conn_fd = sock_fd.connect((server_host_port[host_index][0],server_host_port[host_index][1]))
print conn_fd
sock_con_fd[host_index].insert(0,sock_fd)
sock_con_fd[host_index].insert(1,conn_fd)
for item in sock_con_fd:
print item
if __name__=="__main__":
print("now in main")
connect_host()
for j in range(1,1000):
key = string.join(random.sample(['z','y','x','w','v','u','t','s','r','a'], 5)).replace(' ','')
print(key)
store_key(key)
| Python |
#!/usr/bin/python
import sys,string
from socket import *
import hashlib
import random
server_host_port=[
["127.0.0.1",12324],
["127.0.0.1",12322],
["127.0.0.1",12323]
]
sock_con_fd=[]
def store_key(key):
host_index = int(hashlib.md5(key).hexdigest(),16) % len(server_host_port)
print host_index
now_host_hostname = server_host_port[host_index][0]
now_host_port = server_host_port[host_index][1]
print now_host_hostname
print now_host_port
# connect to server on the port
request ='set ' + key + ' 0 0 3 123'
print request
sock_con_fd[host_index][0].send(request)
def connect_host():
for host_index in range(len(server_host_port)):
sock_con_fd.append([])
sock_fd = socket(AF_INET, SOCK_STREAM)
print sock_fd
conn_fd = sock_fd.connect((server_host_port[host_index][0],server_host_port[host_index][1]))
print conn_fd
sock_con_fd[host_index].insert(0,sock_fd)
sock_con_fd[host_index].insert(1,conn_fd)
for item in sock_con_fd:
print item
if __name__=="__main__":
print("now in main")
connect_host()
for j in range(1,1000):
key = string.join(random.sample(['z','y','x','w','v','u','t','s','r','a'], 5)).replace(' ','')
print(key)
store_key(key)
| Python |
#!/usr/bin/python
import sys
from socket import *
serverHost = 'localhost' # servername is localhost
serverPort = 12321# use arbitrary port > 1024
s = socket(AF_INET, SOCK_STREAM) # create a TCP socket
s.connect((serverHost, serverPort)) # connect to server on the port
s.send('set xguru 0 0 3 123');
data = s.recv(1024) # receive up to 1K bytes
print data
| Python |
#!/usr/bin/python
import sys
from socket import *
serverHost = 'localhost' # servername is localhost
serverPort = 12321# use arbitrary port > 1024
s = socket(AF_INET, SOCK_STREAM) # create a TCP socket
s.connect((serverHost, serverPort)) # connect to server on the port
s.send('set xguru 0 0 3 123');
data = s.recv(1024) # receive up to 1K bytes
print data
| Python |
from libflycached import *
if __name__=="__main__":
print("now in main")
connect_host()
store_item('xguru',0,0,3,123)
get_item('xguru')
close_connection()
| Python |
#!/usr/bin/python
import sys,string
import hashlib
from socket import *
server_host_port=[
["127.0.0.1",12324],
["127.0.0.1",12322],
["127.0.0.1",12325]
]
sock_con_fd=[]
def store_item(key,flags,exptime,length,data):
host_index = get_host_index(key)
now_host_hostname = server_host_port[host_index][0]
now_host_port = server_host_port[host_index][1]
print now_host_hostname
print now_host_port
# connect to server on the port
request = "set %s %s %s %s %s" % (key,flags,exptime,length,data)
#request ='set ' + key + ' ' + flags + ' '+ exptime + '' + length + '' + data
print request
sock_con_fd[host_index][0].send(request)
reply = sock_con_fd[host_index][0].recv(1024) # receive up to 1K bytes
print reply
def get_item(key):
host_index = get_host_index(key)
now_host_hostname = server_host_port[host_index][0]
now_host_port = server_host_port[host_index][1]
print now_host_hostname
print now_host_port
# connect to server on the port
request ='get %s' % (key)
sock_con_fd[host_index][0].send(request)
reply = sock_con_fd[host_index][0].recv(1024)
print reply
def get_host_index(key):
host_index = int(hashlib.md5(key).hexdigest(),16) % len(server_host_port)
print host_index
return host_index
def connect_host():
for host_index in range(len(server_host_port)):
sock_con_fd.append([])
sock_fd = socket(AF_INET, SOCK_STREAM)
print sock_fd
conn_fd = sock_fd.connect((server_host_port[host_index][0],server_host_port[host_index][1]))
print conn_fd
sock_con_fd[host_index].insert(0,sock_fd)
sock_con_fd[host_index].insert(1,conn_fd)
for item in sock_con_fd:
print item
def close_connection():
for host_index in range(len(server_host_port)):
sock_con_fd[host_index][0].close()
#if __name__=="__main__":
# print("now in main")
# connect_host()
# for j in range(1,1000):
# key = string.join(random.sample(['z','y','x','w','v','u','t','s','r','a'], 5)).replace(' ','')
# print(key)
# store_item(key)
# close_connection()
| Python |
#!/usr/bin/python
import sys,string
import hashlib
from socket import *
server_host_port=[
["127.0.0.1",12324],
["127.0.0.1",12322],
["127.0.0.1",12325]
]
sock_con_fd=[]
def store_item(key,flags,exptime,length,data):
host_index = get_host_index(key)
now_host_hostname = server_host_port[host_index][0]
now_host_port = server_host_port[host_index][1]
print now_host_hostname
print now_host_port
# connect to server on the port
request = "set %s %s %s %s %s" % (key,flags,exptime,length,data)
#request ='set ' + key + ' ' + flags + ' '+ exptime + '' + length + '' + data
print request
sock_con_fd[host_index][0].send(request)
reply = sock_con_fd[host_index][0].recv(1024) # receive up to 1K bytes
print reply
def get_item(key):
host_index = get_host_index(key)
now_host_hostname = server_host_port[host_index][0]
now_host_port = server_host_port[host_index][1]
print now_host_hostname
print now_host_port
# connect to server on the port
request ='get %s' % (key)
sock_con_fd[host_index][0].send(request)
reply = sock_con_fd[host_index][0].recv(1024)
print reply
def get_host_index(key):
host_index = int(hashlib.md5(key).hexdigest(),16) % len(server_host_port)
print host_index
return host_index
def connect_host():
for host_index in range(len(server_host_port)):
sock_con_fd.append([])
sock_fd = socket(AF_INET, SOCK_STREAM)
print sock_fd
conn_fd = sock_fd.connect((server_host_port[host_index][0],server_host_port[host_index][1]))
print conn_fd
sock_con_fd[host_index].insert(0,sock_fd)
sock_con_fd[host_index].insert(1,conn_fd)
for item in sock_con_fd:
print item
def close_connection():
for host_index in range(len(server_host_port)):
sock_con_fd[host_index][0].close()
#if __name__=="__main__":
# print("now in main")
# connect_host()
# for j in range(1,1000):
# key = string.join(random.sample(['z','y','x','w','v','u','t','s','r','a'], 5)).replace(' ','')
# print(key)
# store_item(key)
# close_connection()
| Python |
#!/usr/bin/python
import sys,string
from socket import *
import hashlib
import random
server_host_port=[
["127.0.0.1",12324],
["127.0.0.1",12322],
["127.0.0.1",12323]
]
sock_con_fd=[]
def store_key(key):
host_index = int(hashlib.md5(key).hexdigest(),16) % len(server_host_port)
print host_index
now_host_hostname = server_host_port[host_index][0]
now_host_port = server_host_port[host_index][1]
print now_host_hostname
print now_host_port
# connect to server on the port
request ='set ' + key + ' 0 0 3 123'
print request
sock_con_fd[host_index][0].send(request)
def connect_host():
for host_index in range(len(server_host_port)):
sock_con_fd.append([])
sock_fd = socket(AF_INET, SOCK_STREAM)
print sock_fd
conn_fd = sock_fd.connect((server_host_port[host_index][0],server_host_port[host_index][1]))
print conn_fd
sock_con_fd[host_index].insert(0,sock_fd)
sock_con_fd[host_index].insert(1,conn_fd)
for item in sock_con_fd:
print item
if __name__=="__main__":
print("now in main")
connect_host()
for j in range(1,1000):
key = string.join(random.sample(['z','y','x','w','v','u','t','s','r','a'], 5)).replace(' ','')
print(key)
store_key(key)
| Python |
#!/usr/bin/python
import sys,string
from socket import *
import hashlib
import random
server_host_port=[
["127.0.0.1",12324],
["127.0.0.1",12322],
["127.0.0.1",12323]
]
sock_con_fd=[]
def store_key(key):
host_index = int(hashlib.md5(key).hexdigest(),16) % len(server_host_port)
print host_index
now_host_hostname = server_host_port[host_index][0]
now_host_port = server_host_port[host_index][1]
print now_host_hostname
print now_host_port
# connect to server on the port
request ='set ' + key + ' 0 0 3 123'
print request
sock_con_fd[host_index][0].send(request)
def connect_host():
for host_index in range(len(server_host_port)):
sock_con_fd.append([])
sock_fd = socket(AF_INET, SOCK_STREAM)
print sock_fd
conn_fd = sock_fd.connect((server_host_port[host_index][0],server_host_port[host_index][1]))
print conn_fd
sock_con_fd[host_index].insert(0,sock_fd)
sock_con_fd[host_index].insert(1,conn_fd)
for item in sock_con_fd:
print item
if __name__=="__main__":
print("now in main")
connect_host()
for j in range(1,1000):
key = string.join(random.sample(['z','y','x','w','v','u','t','s','r','a'], 5)).replace(' ','')
print(key)
store_key(key)
| Python |
import os
# My os is Windows 7 ,change this to your os.name
if os.name == 'nt':
DEBUG = True
else:
DEBUG = False
TEMPLATE_DEBUG = DEBUG
TIME_ZONE = 'Asia/Shanghai'
#TIME_ZONE = 'PRC'
LANGUAGE_CODE = 'zh-cn'
SITE_ID = 1
USE_I18N = True
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
DATABASE_ENGINE = 'appengine'
CACHE_BACKEND = 'memcached://'
CACHE_MIDDLEWARE_KEY_PREFIX = '_cache_'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '0#lg)78qn3povbco0w1vgfbgmpw)h%beop)ytch_5c=r%em9k7'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
#'django.middleware.cache.CacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'urls'
ROOT_PATH = os.path.dirname(__file__)
TEMPLATE_DIRS = (
ROOT_PATH + '/templates',
)
INSTALLED_APPS = (
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.humanize',
'appengine_django',
'blog',
)
| Python |
#!/usr/bin/env python
'''
GAEUnit: Google App Engine Unit Test Framework
Usage:
1. Put gaeunit.py into your application directory. Modify 'app.yaml' by
adding the following mapping below the 'handlers:' section:
- url: /test.*
script: gaeunit.py
2. Write your own test cases by extending unittest.TestCase.
3. Launch the development web server. To run all tests, point your browser to:
http://localhost:8080/test (Modify the port if necessary.)
For plain text output add '?format=plain' to the above URL.
See README.TXT for information on how to run specific tests.
4. The results are displayed as the tests are run.
Visit http://code.google.com/p/gaeunit for more information and updates.
------------------------------------------------------------------------------
Copyright (c) 2008-2009, George Lei and Steven R. Farley. All rights reserved.
Distributed under the following BSD license:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------------------------------------------------------------------------------
'''
__author__ = "George Lei and Steven R. Farley"
__email__ = "George.Z.Lei@Gmail.com"
__version__ = "#Revision: 1.2.8 $"[11:-2]
__copyright__= "Copyright (c) 2008-2009, George Lei and Steven R. Farley"
__license__ = "BSD"
__url__ = "http://code.google.com/p/gaeunit"
import sys
import os
import unittest
import time
import logging
import cgi
import django.utils.simplejson
from google.appengine.ext import webapp
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
from google.appengine.ext.webapp.util import run_wsgi_app
_LOCAL_TEST_DIR = 'test' # location of files
_WEB_TEST_DIR = '/test' # how you want to refer to tests on your web server
# or:
# _WEB_TEST_DIR = '/u/test'
# then in app.yaml:
# - url: /u/test.*
# script: gaeunit.py
##############################################################################
# Main request handler
##############################################################################
class MainTestPageHandler(webapp.RequestHandler):
def get(self):
unknown_args = [arg for arg in self.request.arguments()
if arg not in ("format", "package", "name")]
if len(unknown_args) > 0:
errors = []
for arg in unknown_args:
errors.append(_log_error("The request parameter '%s' is not valid." % arg))
self.error(404)
self.response.out.write(" ".join(errors))
return
format = self.request.get("format", "html")
if format == "html":
self._render_html()
elif format == "plain":
self._render_plain()
else:
error = _log_error("The format '%s' is not valid." % cgi.escape(format))
self.error(404)
self.response.out.write(error)
def _render_html(self):
suite, error = _create_suite(self.request)
if not error:
self.response.out.write(_MAIN_PAGE_CONTENT % (_test_suite_to_json(suite), _WEB_TEST_DIR, __version__))
else:
self.error(404)
self.response.out.write(error)
def _render_plain(self):
self.response.headers["Content-Type"] = "text/plain"
runner = unittest.TextTestRunner(self.response.out)
suite, error = _create_suite(self.request)
if not error:
self.response.out.write("====================\n" \
"GAEUnit Test Results\n" \
"====================\n\n")
_run_test_suite(runner, suite)
else:
self.error(404)
self.response.out.write(error)
##############################################################################
# JSON test classes
##############################################################################
class JsonTestResult(unittest.TestResult):
def __init__(self):
unittest.TestResult.__init__(self)
self.testNumber = 0
def render_to(self, stream):
result = {
'runs': self.testsRun,
'total': self.testNumber,
'errors': self._list(self.errors),
'failures': self._list(self.failures),
}
stream.write(django.utils.simplejson.dumps(result).replace('},', '},\n'))
def _list(self, list):
dict = []
for test, err in list:
d = {
'desc': test.shortDescription() or str(test),
'detail': err,
}
dict.append(d)
return dict
class JsonTestRunner:
def run(self, test):
self.result = JsonTestResult()
self.result.testNumber = test.countTestCases()
startTime = time.time()
test(self.result)
stopTime = time.time()
timeTaken = stopTime - startTime
return self.result
class JsonTestRunHandler(webapp.RequestHandler):
def get(self):
self.response.headers["Content-Type"] = "text/javascript"
test_name = self.request.get("name")
_load_default_test_modules()
suite = unittest.defaultTestLoader.loadTestsFromName(test_name)
runner = JsonTestRunner()
_run_test_suite(runner, suite)
runner.result.render_to(self.response.out)
# This is not used by the HTML page, but it may be useful for other client test runners.
class JsonTestListHandler(webapp.RequestHandler):
def get(self):
self.response.headers["Content-Type"] = "text/javascript"
suite, error = _create_suite(self.request)
if not error:
self.response.out.write(_test_suite_to_json(suite))
else:
self.error(404)
self.response.out.write(error)
##############################################################################
# Module helper functions
##############################################################################
def _create_suite(request):
package_name = request.get("package")
test_name = request.get("name")
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
error = None
try:
if not package_name and not test_name:
modules = _load_default_test_modules()
for module in modules:
suite.addTest(loader.loadTestsFromModule(module))
elif test_name:
_load_default_test_modules()
suite.addTest(loader.loadTestsFromName(test_name))
elif package_name:
package = reload(__import__(package_name))
module_names = package.__all__
for module_name in module_names:
suite.addTest(loader.loadTestsFromName('%s.%s' % (package_name, module_name)))
if suite.countTestCases() == 0:
raise Exception("'%s' is not found or does not contain any tests." % \
(test_name or package_name or 'local directory: \"%s\"' % _LOCAL_TEST_DIR))
except Exception, e:
error = str(e)
_log_error(error)
return (suite, error)
def _load_default_test_modules():
if not _LOCAL_TEST_DIR in sys.path:
sys.path.append(_LOCAL_TEST_DIR)
module_names = [mf[0:-3] for mf in os.listdir(_LOCAL_TEST_DIR) if mf.endswith(".py")]
return [reload(__import__(name)) for name in module_names]
def _get_tests_from_suite(suite, tests):
for test in suite:
if isinstance(test, unittest.TestSuite):
_get_tests_from_suite(test, tests)
else:
tests.append(test)
def _test_suite_to_json(suite):
tests = []
_get_tests_from_suite(suite, tests)
test_tuples = [(type(test).__module__, type(test).__name__, test._testMethodName) \
for test in tests]
test_dict = {}
for test_tuple in test_tuples:
module_name, class_name, method_name = test_tuple
if module_name not in test_dict:
mod_dict = {}
method_list = []
method_list.append(method_name)
mod_dict[class_name] = method_list
test_dict[module_name] = mod_dict
else:
mod_dict = test_dict[module_name]
if class_name not in mod_dict:
method_list = []
method_list.append(method_name)
mod_dict[class_name] = method_list
else:
method_list = mod_dict[class_name]
method_list.append(method_name)
return django.utils.simplejson.dumps(test_dict)
def _run_test_suite(runner, suite):
"""Run the test suite.
Preserve the current development apiproxy, create a new apiproxy and
replace the datastore with a temporary one that will be used for this
test suite, run the test suite, and restore the development apiproxy.
This isolates the test datastore from the development datastore.
"""
original_apiproxy = apiproxy_stub_map.apiproxy
try:
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
temp_stub = datastore_file_stub.DatastoreFileStub('GAEUnitDataStore', None, None, trusted=True)
apiproxy_stub_map.apiproxy.RegisterStub('datastore', temp_stub)
# Allow the other services to be used as-is for tests.
for name in ['user', 'urlfetch', 'mail', 'memcache', 'images']:
apiproxy_stub_map.apiproxy.RegisterStub(name, original_apiproxy.GetStub(name))
runner.run(suite)
finally:
apiproxy_stub_map.apiproxy = original_apiproxy
def _log_error(s):
logging.warn(s)
return s
################################################
# Browser HTML, CSS, and Javascript
################################################
# This string uses Python string formatting, so be sure to escape percents as %%.
_MAIN_PAGE_CONTENT = """
<html>
<head>
<style>
body {font-family:arial,sans-serif; text-align:center}
#title {font-family:"Times New Roman","Times Roman",TimesNR,times,serif; font-size:28px; font-weight:bold; text-align:center}
#version {font-size:87%%; text-align:center;}
#weblink {font-style:italic; text-align:center; padding-top:7px; padding-bottom:7px}
#results {padding-top:20px; margin:0pt auto; text-align:center; font-weight:bold}
#testindicator {width:750px; height:16px; border-style:solid; border-width:2px 1px 1px 2px; background-color:#f8f8f8;}
#footerarea {text-align:center; font-size:83%%; padding-top:25px}
#errorarea {padding-top:25px}
.error {border-color: #c3d9ff; border-style: solid; border-width: 2px 1px 2px 1px; width:750px; padding:1px; margin:0pt auto; text-align:left}
.errtitle {background-color:#c3d9ff; font-weight:bold}
</style>
<script language="javascript" type="text/javascript">
var testsToRun = %s;
var totalRuns = 0;
var totalErrors = 0;
var totalFailures = 0;
function newXmlHttp() {
try { return new XMLHttpRequest(); } catch(e) {}
try { return new ActiveXObject("Msxml2.XMLHTTP"); } catch (e) {}
try { return new ActiveXObject("Microsoft.XMLHTTP"); } catch (e) {}
alert("XMLHttpRequest not supported");
return null;
}
function requestTestRun(moduleName, className, methodName) {
var methodSuffix = "";
if (methodName) {
methodSuffix = "." + methodName;
}
var xmlHttp = newXmlHttp();
xmlHttp.open("GET", "%s/run?name=" + moduleName + "." + className + methodSuffix, true);
xmlHttp.onreadystatechange = function() {
if (xmlHttp.readyState != 4) {
return;
}
if (xmlHttp.status == 200) {
var result = eval("(" + xmlHttp.responseText + ")");
totalRuns += parseInt(result.runs);
totalErrors += result.errors.length;
totalFailures += result.failures.length;
document.getElementById("testran").innerHTML = totalRuns;
document.getElementById("testerror").innerHTML = totalErrors;
document.getElementById("testfailure").innerHTML = totalFailures;
if (totalErrors == 0 && totalFailures == 0) {
testSucceed();
} else {
testFailed();
}
var errors = result.errors;
var failures = result.failures;
var details = "";
for(var i=0; i<errors.length; i++) {
details += '<p><div class="error"><div class="errtitle">ERROR ' +
errors[i].desc +
'</div><div class="errdetail"><pre>'+errors[i].detail +
'</pre></div></div></p>';
}
for(var i=0; i<failures.length; i++) {
details += '<p><div class="error"><div class="errtitle">FAILURE ' +
failures[i].desc +
'</div><div class="errdetail"><pre>' +
failures[i].detail +
'</pre></div></div></p>';
}
var errorArea = document.getElementById("errorarea");
errorArea.innerHTML += details;
} else {
document.getElementById("errorarea").innerHTML = xmlHttp.responseText;
testFailed();
}
};
xmlHttp.send(null);
}
function testFailed() {
document.getElementById("testindicator").style.backgroundColor="red";
}
function testSucceed() {
document.getElementById("testindicator").style.backgroundColor="green";
}
function runTests() {
// Run each test asynchronously (concurrently).
var totalTests = 0;
for (var moduleName in testsToRun) {
var classes = testsToRun[moduleName];
for (var className in classes) {
// TODO: Optimize for the case where tests are run by class so we don't
// have to always execute each method separately. This should be
// possible when we have a UI that allows the user to select tests
// by module, class, and method.
//requestTestRun(moduleName, className);
methods = classes[className];
for (var i = 0; i < methods.length; i++) {
totalTests += 1;
var methodName = methods[i];
requestTestRun(moduleName, className, methodName);
}
}
}
document.getElementById("testtotal").innerHTML = totalTests;
}
</script>
<title>GAEUnit: Google App Engine Unit Test Framework</title>
</head>
<body onload="runTests()">
<div id="headerarea">
<div id="title">GAEUnit: Google App Engine Unit Test Framework</div>
<div id="version">Version %s</div>
</div>
<div id="resultarea">
<table id="results"><tbody>
<tr><td colspan="3"><div id="testindicator"> </div></td</tr>
<tr>
<td>Runs: <span id="testran">0</span>/<span id="testtotal">0</span></td>
<td>Errors: <span id="testerror">0</span></td>
<td>Failures: <span id="testfailure">0</span></td>
</tr>
</tbody></table>
</div>
<div id="errorarea"></div>
<div id="footerarea">
<div id="weblink">
<p>
Please visit the <a href="http://code.google.com/p/gaeunit">project home page</a>
for the latest version or to report problems.
</p>
<p>
Copyright 2008-2009 <a href="mailto:George.Z.Lei@Gmail.com">George Lei</a>
and <a href="mailto:srfarley@gmail.com>Steven R. Farley</a>
</p>
</div>
</div>
</body>
</html>
"""
##############################################################################
# Script setup and execution
##############################################################################
application = webapp.WSGIApplication([('%s' % _WEB_TEST_DIR, MainTestPageHandler),
('%s/run' % _WEB_TEST_DIR, JsonTestRunHandler),
('%s/list' % _WEB_TEST_DIR, JsonTestListHandler)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
# -*- coding:utf-8 -*-
#from django.db import models
import os
import datetime
import datastore_cache
datastore_cache.DatastoreCachingShim.Install()
from appengine_django.models import BaseModel
from google.appengine.ext import db
from django.utils.encoding import iri_to_uri
#themes = os.listdir(os.path.dirname(__file__) + '/../themes')
class User(BaseModel):
name = db.StringProperty('用户')
password = db.BlobProperty('密码')
google_user = db.UserProperty()
class Blog(BaseModel):
user = db.ReferenceProperty(User)
name = db.StringProperty('博客标题',default=u'博客标题')
description = db.TextProperty('博客描述')
rss = db.StringProperty('订阅地址',default='/feeds/latest')
#theme = db.StringProperty('主题',choices=themes)
sidebar = db.TextProperty('自定义主页边栏(html)')
class Author(BaseModel):
name = db.StringProperty('作者',default='Author')
avatar = db.StringProperty('头像',default='/static/avatars/default.jpg')
description = db.TextProperty('介绍',default=u'更改你的个人说明')
email = db.StringProperty('Email')
class Tag(BaseModel):
name = db.StringProperty('名字')
count = db.IntegerProperty(default=0)
archives = db.ListProperty(db.Key)
def get_absolute_url(self):
return iri_to_uri('/tag/%s' % self.key().name())
class MBlog(BaseModel):
user = db.ReferenceProperty(User,collection_name='mblogs')
content = db.StringProperty('内容')
pub_date = db.DateTimeProperty('时间')
class Category(BaseModel):
name = db.StringProperty()
posts = db.IntegerProperty(default=0)
def query_news(self):
return self.archives.order('-pub_date')
def __unicode__(self):
return self.name + ' - %d' % self.posts
def get_absolute_url(self):
return iri_to_uri('/category/%s' % self.key().name())
class Archive(BaseModel):
title = db.StringProperty('标题',required=True)
category = db.ReferenceProperty(Category,
collection_name='archives',
required=True,
verbose_name='分类'
)
content = db.TextProperty('正文',required=True)
view_nums = db.IntegerProperty(default=0)
pub_date = db.DateTimeProperty( auto_now_add =True)
update_date = db.DateTimeProperty( auto_now = True )
tags = db.StringProperty('标签',default=' ')
def process_tags(self):
"""检查标签"""
#tags = self.tags.replace(',' , ' ')
#tags = tags.replace(';' , ' ')
#self.tags = tags
#self.put()
if not self.tags or len(self.tags.lstrip().rstrip()) == 0:
return
tags = self.tags.split(',')
tags = [ tag.rstrip().lstrip() for tag in tags ]
self.tags = ','.join(tags)
self.put()
tag_model = [ Tag.get_or_insert(key_name=tag,name=tag) for tag in tags ]
tag_model = [tag for tag in tag_model if not self.key() in tag.archives ]
process = lambda model: model.archives.append(self.key())
map( process , tag_model )
map( lambda m: m.put() , tag_model )
def query_comments(self):
return self.comments.order('date')
def get_absolute_url(self):
return iri_to_uri('/archive/%d' % self.key().id())
class Comment(BaseModel):
archive = db.ReferenceProperty( Archive ,default=None,collection_name='comments' )
mblog = db.ReferenceProperty( MBlog ,default=None,collection_name='comments')
name = db.StringProperty(required=True)
avatar = db.StringProperty(default='/static/avatars/default.jpg')
email = db.EmailProperty(required=False)
web = db.StringProperty()
source = db.StringProperty()
content = db.TextProperty(required=True)
date = db.DateTimeProperty( auto_now_add = True )
def get_absolute_url(self):
return self.archive.get_absolute_url()
class Image(BaseModel):
name = db.StringProperty()
content_type = db.StringProperty()
data = db.BlobProperty()
archive = db.ReferenceProperty(Archive,collection_name='images')
def get_absolute_url(self):
return '/image/%s' % str(self.key())
| Python |
# -*- coding:utf-8 -*-
import datetime
import md5
from google.appengine.api import users
from google.appengine.api import memcache
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.utils import simplejson
from django.conf import settings
def login_required( func ):
"""登录验证"""
def wrap(request,*args,**kwargs):
if not users.get_current_user():
# 通过Google 用户API 检查用户
return HttpResponseRedirect(
users.create_login_url(request.GET.get('continue'))
)
return func(request,*args,**kwargs)
return wrap
##############################
#
# 未使用
#def master_required( view ):
# """需要管理员身份"""
# def wrap(request,*args,**kwargs):
# if not request.user == request.master:
# return HttpResponseForbidden()
# else:
# return view(request,*args,**kwargs)
# return wrap
#
##############################
def admin_required( func ):
"""需要管理员权限"""
def wrap(request,*args,**kwargs):
if not users.is_current_user_admin():
if request.method == 'GET':
return HttpResponseRedirect(users.create_login_url(request.path))
else:
return HttpResponse('Permission Denied')
return func(request,*args,**kwargs)
return wrap
def expose( type,html = '',error='error.html',check_ajax=True,redirect=None):
def wrap(func):
def render(request,**kwargs):
if kwargs:
context = func( request ,kwargs )
else:
context = func( request )
if type == 'json':
return HttpResponse(simplejson.dumps(context))
if check_ajax and request.is_ajax():
return HttpResponse(simplejson.dumps(context))
if not redirect:
redirect = context.get('redirect')
if redirect:
return HttpRedirect(redirect)
return render
return wrap
def _cntime():
"""根据GAE的UTC时间计算中国时区的时间"""
return datetime.datetime.utcnow() + datetime.timedelta(hours=+8)
def cache_page(seconds,vary_user=False):
"""
通过memcache 对view的输出结果进行缓存
@param vary_user: 是否对不同的用户进行不同的缓存
"""
def deco(view):
def wrap(request, *args):
if request.method == 'GET':
key = view.__name__ + '_' + '_'.join([ str(argu.encode('utf-8')) for argu in args])
# 将 HTTP_ACCEPT 的值加入 KEY 中
# 主要区别 WAP浏览器 和 PC浏览器
key += md5.md5(request.META.get('HTTP_ACCEPT')).hexdigest()
if vary_user:
user = users.get_current_user()
if user:
key = "%s_%s" %( user.user_id() , key )
resp = memcache.get(key)
if resp:
return resp
else:
resp = view(request , *args )
memcache.set( key , resp ,seconds)
return resp
return view(request,*args)
return wrap
return deco
def clear_cache(key='',key_start=''):
"""
Clear cache whose key is key or starts with key_start
@note: Only 'POST' request will clear the cache
"""
def deco(view):
def wrap( request , * args , **kwargs ):
if request.method == 'POST':
resp = view( request , *args ,**kwargs)
if not key:
memcache.flush_all()
return resp
return view( request ,*args,**kwargs)
return wrap
return deco
def blank(*args,**kwargs):
"""在开发状态(DEBUG=True)时,取消缓存"""
def deco(view):
def wrap(request,*args,**kwargs):
return view(request,*args,**kwargs)
return wrap
return deco
if settings.DEBUG:
cache_page = blank
clear_cache = blank
| Python |
from django.shortcuts import render_to_response
from blog.models import *
def index(request):
archives = Archive.all().order('-pub_date')
host = request.get_host()
return render_to_response('sitemap.xml',dict(archives=archives,
host = host))
| Python |
from google.appengine.ext.db import djangoforms
from models import *
class CommentForm(djangoforms.ModelForm):
class Meta:
model = Comment
exclude = ['mblog','source','date','archive','avatar']
class ArchiveForm(djangoforms.ModelForm):
class Meta:
model = Archive
exclude = ['pub_date','update_date','view_nums']
class BlogForm(djangoforms.ModelForm):
class Meta:
model = Blog
class AuthorForm(djangoforms.ModelForm):
class Meta:
model = Author
| Python |
"""
"""
#from django.test import TestCase
import unittest
from django.test.client import Client
from blog.models import *
Author.get_or_insert(key_name='author',name='test')
class LoginTest(unittest.TestCase):
def setUp(self):
self.user = User(name='test',password='password',google_user=None)
self.user.put()
self.client = Client()
self.client.post('/login',{'user':'test','password':'password'})
class SimpleTest(unittest.TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
class MblogTest(LoginTest):
def test_(self):
pass
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
>>> from blog.models import *
>>> category = Category(name='abc')
>>> category.put()
datastore...
>>> archive = Archive(title='Test',category=category,content='For test',tags='Abc,Hello,ABC')
>>> archive.put()
datastore...
>>> archive.process_tags()
>>> tags = Tag.all().order('name')
>>> [ tag.name for tag in tags ]
[u'ABC', u'Abc'...
>>>
"""}
| Python |
# -*- coding:utf-8 -*-
from google.appengine.api import users
from django import template
from blog.models import *
register = template.Library()
class BlogInit(template.Node):
def __init__(self):
pass
def render(self,context):
context['env_blog'] = Blog.get_or_insert('blog')
context['env_author'] = Author.get_or_insert('author')
context['env_is_admin'] = users.is_current_user_admin()
context['env_tags'] = Tag.all()
return ''
@register.tag(name='blog_init')
def blog(parser,token):
return BlogInit()
| Python |
| Python |
from blog.models import *
def q_new_comments(num):
"""Return num comments order by date desc"""
| Python |
from django.contrib.syndication.feeds import Feed
from models import *
class LatestArchives(Feed):
title = Blog.get_or_insert(key_name='blog').name
link = "/"
description = Blog.get_or_insert(key_name='blog').description
def item_pubdate(self,item):
return item.pub_date
def item_categories(self,item):
return [item.category.name]
item_author_name = Author.get_or_insert(key_name='author').name
def items(self):
return Archive.all().order('-pub_date')[:20]
| Python |
# -*- coding:utf-8 -*-
import random
import datetime
from google.appengine.api import users
from google.appengine.ext.webapp.util import login_required
from google.appengine.api import memcache
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseForbidden
from django.http import HttpResponseNotFound
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from blog.models import *
from blog.utils import *
from blog.forms import *
from blog.utils import _cntime
@cache_page(60*10)
def index(request):
"""
Show the index page
request -- the HttpRequest object
"""
# 通过 'VND.WAP.WML' 判断是否为WAP浏览器
if 'VND.WAP.WML' in request.META.get('HTTP_ACCEPT').upper():
return HttpResponseRedirect('/wap')
categories = Category.all()
new_archives = Archive.all().order('-pub_date')[:5]
new_comments = Comment.all().order('-date')[:6]
top_archives = Archive.all().order('-view_nums')[:5]
new_mblogs = MBlog.all().order('-pub_date')[:5]
mblog_count = MBlog.all().count()
mblog_comment_count = Comment.all().filter('mblog !=',None).count()
context =dict(categories=categories,
new_archives=new_archives,
top_archives=top_archives,
new_mblogs = new_mblogs,
new_comments = new_comments,
mblog_count = mblog_count,
mblog_comment_count = mblog_comment_count,
)
return render_to_response('index.html',context)
@admin_required
@clear_cache()
def add_mblog(request):
"""发表新微博"""
if request.method == 'POST':
content = request.POST.get('content')
if not content:
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
mblog = MBlog(content=content,pub_date=_cntime())
mblog.put()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@admin_required
@clear_cache()
def add_category(request):
"""添加新分类"""
if request.method == 'GET':
return render_to_response('add_category.html')
elif request.method == 'POST':
name = request.POST.get('name')
if not name:
return render_to_response('add_category.html', {'error':'必填'})
key_name = name.replace(' ', '_')
# 把空格替换为 _
if Category.get_by_key_name(key_name):
return render_to_response('add_category.html', dict(error=u'已存在'))
category = Category(name=name, key_name=key_name)
category.put()
return HttpResponseRedirect(category.get_absolute_url().encode('utf-8'))
@admin_required
@clear_cache()
def edit_category(request,name=''):
"""修改分类名字"""
category = Category.get_by_key_name(name)
if not category:
return HttpResponseRedirect('/not_found')
if request.method == 'GET':
return render_to_response('edit_category.html',dict(category=category))
elif request.method == 'POST':
name = request.POST.get('name')
if not name:
return render_to_response('edit_category.html',dict(error=u'required'),category=category)
category.name = name
category.put()
return HttpResponseRedirect(category.get_absolute_url())
@clear_cache()
def delete_category(request,name=''):
if not users.is_current_user_admin():
return HttpResponseForbidden()
if not request.META.get('HTTP_REFERER', '').startswith('http://'+request.get_host()):
# 禁止非本域的请求
return HttpResponseForbidden();
category = Category.get_by_key_name(name)
if not category:
return HttpResponseRedirect('/not_found')
category.delete()
return HttpResponseRedirect('/')
@cache_page(10*60 ,vary_user=True)
def category(request, name=''):
"""
@param name: 分类 keyname
"""
if request.method == 'GET':
# /category/id
category = Category.get_by_key_name(name)
if not category:
return HttpResponseRedirect('/not_found')
archives = category.archives.order('-pub_date')
context = dict(category = category,
archives = archives
)
return render_to_response('category.html', context )
elif request.method == 'DELETE':
if not users.is_current_user_admin():
return HttpResponseForbidden()
category = Category.get_by_key_name(name)
if not category:
return HttpResponseRedirect('/not_found')
if category.archives.count() > 0:
pass
category.delete()
def tag(request,name=''):
tag = Tag.get_by_key_name(name)
if not tag:
return HttpResponseRedirect('/not_found')
archives = [Archive.get(key) for key in tag.archives ]
return render_to_response('tag.html',dict(tag=tag,archives=archives))
@admin_required
@clear_cache()
def add_archive(request):
if request.method == 'GET':
form = ArchiveForm()
return render_to_response('edit_archive.html',dict(form=form))
elif request.method == 'POST':
form = ArchiveForm(request.POST)
if not form.is_valid():
return render_to_response('edit_archive.html',dict(form=form))
archive = form.save()
archive.pub_date = _cntime()
archive.put()
archive.process_tags()
archive.category.posts += 1
archive.category.put()
return HttpResponseRedirect(archive.get_absolute_url())
@admin_required
@clear_cache()
def edit_archive(request,id):
archive = Archive.get_by_id(int(id))
if not archive:
return HttpResponseRedirect('/not_found')
if request.method == 'GET':
form = ArchiveForm(instance=archive)
context = dict(form=form,edit=True,id=archive.key().id())
return render_to_response('edit_archive.html',context)
elif request.method == 'POST':
form = ArchiveForm(request.POST)
if not form.is_valid():
context = dict(form=form,edit=Ture,id=archive.key().id())
return render_to_response('edit_archive.html',context)
archive.title = form.cleaned_data['title']
archive.categorie = form.cleaned_data['category']
archive.content = form.cleaned_data['content']
archive.tags = form.cleaned_data['tags']
archive.save()
archive.process_tags()
return HttpResponseRedirect(archive.get_absolute_url())
@clear_cache()
def delete_archive(request,id):
if not users.is_current_user_admin():
return HttpResponseForbidden()
if not request.META.get('HTTP_REFERER','None').startswith('http://'+request.get_host()):
return HttpResponseForbidden();
archive = Archive.get_by_id(int(id))
if not archive:
return HttpResponseRedirect('/not_found')
archive.delete()
return HttpResponseRedirect(archive.category.get_absolute_url())
@cache_page(60*10 ,vary_user=True)
def archive(request, id=-1):
"""
the Archive RESTful handler
id -- archive_id /archive/id
"""
archive = Archive.get_by_id(int(id))
if not archive:
return HttpResponseRedirect('/not_found')
archive.view_nums += 1
archive.put()
user = users.get_current_user()
if user:
comment_form = CommentForm( instance=Comment(
name= user.nickname(),
email = user.email(),
content = u'说点啥呗',
))
else:
comment_form = CommentForm()
if not archive:
return HttpResponseRedirect('/not_found')
context = dict(archive=archive,
comment_form=comment_form,
is_admin=users.is_current_user_admin(),
)
if archive.tags:
tags = archive.tags.split(',')
context['tags'] = tags
return render_to_response('archive.html',context)
@cache_page(60)
def sidebar(request):
categories = Category.all().order('posts')
populars = Archive.all().order('-view_nums')[:7]
archives = Archive.all().order('-pub_date')[:10]
comments = Comment.all().order('-date')[:10]
tags = Tag.all()
context = dict(categories=categories,
archives=archives,
comments=comments,
populars=populars,
author = Author.get_or_insert('author'),
blog = Blog.get_or_insert('blog'),
tags = tags,
)
return render_to_response('sidebar.html' ,context )
@clear_cache()
def add_comment(request,archive_id):
"""添加评论"""
if not request.method == 'POST':
return HttpResponseForbidden()
archive = Archive.get_by_id(int(archive_id))
if not archive:
return HttpResponseRedirect('/not_found')
form = CommentForm(request.POST)
if not form.is_valid():
return render_to_response('archive.html',
dict(archive=archive, comment_form=form)
)
comment = form.save()
avatars = [ '/static/avatars/avatar_%d.jpg' % i for i in range(1,36)]
comment.avatar = random.choice(avatars)
comment.archive = archive
comment.date = _cntime()
comment.put()
return HttpResponseRedirect(archive.get_absolute_url())
@admin_required
@expose(type='json')
def upload(request):
"""
@description: 上传图片
@return: 图片地址
"""
if not request.method == 'POST':
return dict(success=False,data='Error')
image = request.FILES.get('image')
if not image:
return dict(success=False,data='必须上传个图片')
if image.size > 1024*1024*1:
return dict(success=False,data='图片太大啦')
if not image.name[-3:].lower() in ['jpg','gif','png']:
return dict(success=False,data='不支持的图片类型')
model = Image(name=image.name,
content_type = image.content_type ,
data = image.read(),
)
model.put()
return dict(success=True,
data='http://%s%s' % ( request.get_host(), model.get_absolute_url())
)
@cache_page(60*60*6)
def image(request, key):
"""获取图片"""
image = Image.get(db.Key(key))
if not image:
return HttpResponseNotFound()
return HttpResponse(image.data,image.content_type)
def login(request):
return HttpResponseRedirect(
users.create_login_url(request.META.get('HTTP_REFERER','/'))
)
@clear_cache()
@admin_required
def admin(request, tab=''):
"""管理页面"""
if request.method == 'GET':
blog = BlogForm(instance=Blog.get_or_insert('blog'))
author = AuthorForm(instance= Author.get_or_insert('author') )
context = dict( blog=blog, author = author )
return render_to_response('admin.html',context)
elif request.method == 'POST':
if tab == 'blog':
blog = BlogForm(request.POST)
if not blog.is_valid():
author = AuthorForm(instance=Author.get_or_insert('author') )
context = dict( blog=blog, author = author )
return render_to_response('admin.html', context)
model = Blog(key_name='blog',
name = blog.cleaned_data['name'],
description = blog.cleaned_data['description'],
rss = blog.cleaned_data['rss'],
sidebar = blog.cleaned_data['sidebar'],
)
model.put()
elif tab == 'author':
author = AuthorForm(request.POST)
if not author.is_valid():
blog = BlogForm(instance=Blog.get_or_insert('blog'))
context = dict( blog=blog, author = author )
return render_to_response('admin.html',context)
model = Author(key_name='author',
name = author.cleaned_data['name'],
description = author.cleaned_data['description'],
avatar = author.cleaned_data['avatar'],
email = author.cleaned_data['email'],
)
model.put()
else:
return HttpResponse('nof_found')
return HttpResponseRedirect('/admin')
def not_found(request):
context=dict(referer=request.META.get('HTTP_REFERER'))
return render_to_response('not_found.html',context)
#def wap(request):
# archive = Archive.all().order('-pub_date')[0]
# info = str(request.META)
# return render_to_response('wap.html',dict(info=info))
# return render_to_response('wap.html',dict(archive=archive),mimetype='text/vnd.wap.wml')
#
def archive_nav(request,id,direction):
"""用于鼠标手势 上一篇 及 下一篇 导航"""
archive = Archive.get_by_id(int(id))
if not archive:
return HttpResponseRedirect('/not_found')
if direction == 'next':
op = '<'
else:
op = '>'
q = Archive.all().filter('pub_date ' + op ,archive.pub_date )
if direction == 'next':
ar = q.order('-pub_date').get()
else:
ar = q.order('pub_date').get()
if not ar:
return HttpResponseRedirect('/not_found')
return HttpResponseRedirect(ar.get_absolute_url())
| Python |
# coding:utf-8
from django.http import HttpResponseRedirect,HttpResponse
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from blog.models import *
from blog.utils import *
def index(request):
return render_to_response('mblog/index.html')
@master_required
@expose()
def add_mblog(request):
if request.method == 'POST':
content = request.POST.get('content')
if not content or len(content) > 200:
return dict(success=False,msg='内容不能为空')
mb = MBlog(user=request.master,content=content,pub_date = _cntime())
mb.put()
return dict(success=True,redirect='/%s/mblog' % request.master.name )
@master_required
@expose()
def del_mblog(request):
if request.method == 'POST':
id = request.POST.get('id')
mb = MBlog.get_by_key_name(int(id))
if not mb:
return dict(success=False,msg='无此条记录')
| Python |
from google.appengine.api import users
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect,HttpResponse
from django.core.paginator import Paginator
from blog.models import *
from blog.utils import *
from blog.utils import _cntime
def index(request):
"""WAP 首页"""
archives = Archive.all().order('-pub_date')[:10]
return render_to_response('wap/index.html',dict(archives=archives))
def archives(request):
"""查看所有文章"""
objects = Archive.all().order('-pub_date')
p = Paginator(objects,5)
archives = p.page(request.GET.get('page',1))
return render_to_response('wap/archives.html',dict(archives=archives))
def categories(request):
"""查看所有分类"""
categories = Category.all()
return render_to_response('wap/categories.html',dict(categories=categories))
def comments(request):
"""最新评论"""
objects = Comment.all().order('-date')
p = Paginator(objects,10)
comments = p.page(request.GET.get('page',1))
return render_to_response('wap/comments.html',dict(comments=comments))
def archive(request,id):
"""查看文章"""
archive = Archive.get_by_id(int(id))
archive.view_nums += 1
archive.put()
if not archive:
return HttpResponseRedirect('/wap/not_found')
return render_to_response('wap/archive.html',dict(archive=archive))
def category(request,name):
"""查看分类"""
category = Category.get_by_key_name(name)
if not category:
return HttpResponseRedirect('/wap/not_found')
return render_to_response('wap/category.html',dict(category=category))
@clear_cache()
def add_comment(request,archive_id):
if not request.method == 'POST':
return HttpResponseRedirect('/wap')
name = request.POST.get('name')
archive = Archive.get_by_id( int(archive_id ))
if not archive:
# 文章不存在
return HttpResponseRedirect('/wap/not_found')
comment = Comment( archive = archive,
name = name,
email = name + '@mobile.me',
content = request.POST.get('content'),
date = _cntime(),
# 转换为北京区时间
source = 'wap',
)
comment.put()
return HttpResponseRedirect( '/wap%s#comment_%d' % (
archive.get_absolute_url(),comment.key().id() )
)
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bootstrap for running a Django app under Google App Engine.
The site-specific code is all in other files: settings.py, urls.py,
models.py, views.py. And in fact, only 'settings' is referenced here
directly -- everything else is controlled from there.
"""
# Standard Python imports.
import os
import sys
import logging
from appengine_django import InstallAppengineHelperForDjango
InstallAppengineHelperForDjango()
from appengine_django import have_django_zip
from appengine_django import django_zip_path
# Google App Engine imports.
from google.appengine.ext.webapp import util
# Import the part of Django that we use here.
import django.core.handlers.wsgi
def main():
# Ensure the Django zipfile is in the path if required.
if have_django_zip and django_zip_path not in sys.path:
sys.path.insert(1, django_zip_path)
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
from django.conf.urls.defaults import *
from blog.feeds import LatestArchives
from blog.sitemaps import *
from blog.models import *
feeds = {
'latest': LatestArchives,
}
#
#archive_map = {
# 'queryset': Archive.all(),
# 'date_field': 'pub_date',
#}
#
#sitemaps = {
# 'blog': GenericSitemap( archive_map ,priority=0.5),
# 'archive': ArchiveSitemap,
#}
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
__ROOT__ = os.path.dirname(__file__)
urlpatterns = patterns('',
# Example:
# (r'^waylybaye/', include('waylybaye.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
(r'^themes/(?P<path>.*)$','django.views.static.serve',
{'document_root':__ROOT__ + '/themes'}),
(r'^feeds/(?P<url>.*)$','django.contrib.syndication.views.feed',
{'feed_dict':feeds}),
(r'^sitemap.xml$','blog.sitemaps.index'),
#(r'^sitemap.xml$', 'django.contrib.sitemaps.views.index',
# {'sitemaps' :sitemaps}),
#(r'^sitemap-(?P<section>.+).xml$',
# 'django.contrib.sitemaps.views.sitemap',
# {'sitemaps' :sitemaps}),
(r'^$','blog.views.index'),
(r'^category/(\w+)/edit$', 'blog.views.edit_category'),
(r'^category/(\w+)$','blog.views.category'),
(r'^category$' ,'blog.views.add_category'),
(r'tag/(\w+)$' ,'blog.views.tag'),
(r'^mblog$', 'blog.views.add_mblog'),
(r'^archive/(\d+)/(next|prev)$', 'blog.views.archive_nav'),
(r'^archive/(\d+)/comment$', 'blog.views.add_comment'),
(r'^archive/(\d+)/edit$', 'blog.views.edit_archive'),
(r'^archive/(\d+)/delete$', 'blog.views.delete_archive'),
(r'^archive/(\d+)$','blog.views.archive'),
(r'^archive$' ,'blog.views.add_archive'),
(r'^sidebar$', 'blog.views.sidebar'),
(r'^upload$', 'blog.views.upload'),
(r'^image/(.+)$', 'blog.views.image'),
(r'^login$', 'blog.views.login'),
(r'^admin$', 'blog.views.admin'),
(r'^admin/(?P<tab>blog|author)$', 'blog.views.admin'),
(r'^not_found$', 'blog.views.not_found'),
(r'^wap$', 'blog.wap.index'),
(r'^wap/archives$', 'blog.wap.archives'),
(r'^wap/comments$', 'blog.wap.comments'),
(r'^wap/archive/(\d+)$', 'blog.wap.archive'),
(r'^wap/archive/(\d+)/comment$','blog.wap.add_comment'),
(r'^wap/category/(\w+)$', 'blog.wap.category'),
(r'^wap/not_found$', 'blog.wap.not_found'),
(r'^test$', 'blog.views.test'),
)
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
datastore_cache.py
Created by Alkis Evlogimenos on 2009-04-19.
"""
import itertools
import logging
import threading
from google.appengine.api import memcache
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
"""Provides a shim that caches datastore Get calls.
Example code:
import datastore_cache
datastore_cache.DatastoreCachingShim.Install()
# ...
def main(args):
util.run_wsgi_app(application)
datastore_cache.DatastoreCachingShim.ResetCache()
"""
class APIProxyShim(object):
"""A generic shim class, with methods to install/uninstall it.
Subclasses of this class can be used to replace the real stub for a service,
intercepting and possibly passing on calls to the original stub.
"""
SERVICE_NAME = None # To be overridden by subclasses
_instance = None
def __init__(self, wrapped_stub):
"""Constructor. Internal use only - see Install()."""
self._wrapped_stub = wrapped_stub
def CallWrappedStub(self, call, request, response):
"""Allows subclasses to call the wrapped stub."""
self._wrapped_stub.MakeSyncCall(self.SERVICE_NAME, call, request, response)
def MakeSyncCall(self, service, call, request, response):
assert (service == self.SERVICE_NAME,
'Got service name "%s", expected "%s"'
% (service, self.SERVICE_NAME))
messages = []
assert request.IsInitialized(messages), messages
method = getattr(self, '_Dynamic_' + call, None)
if method:
method(request, response)
else:
self.CallWrappedStub(call, request, response)
assert response.IsInitialized(messages), messages
def __getattr__(self, name):
"""Pass-through to the wrapped stub."""
return getattr(self._wrapped_stub, name)
@classmethod
def Install(cls):
"""Installs the shim. Only needs to be run once at import time.
Note that this accesses internal members of APIProxyStubMap, so may break
in future.
"""
if not cls._instance:
wrapped_stub = apiproxy_stub_map.apiproxy.GetStub(cls.SERVICE_NAME)
assert wrapped_stub, "No service '%s' found to wrap." % cls.SERVICE_NAME
cls._instance = cls(wrapped_stub)
stub_dict = apiproxy_stub_map.apiproxy._APIProxyStubMap__stub_map
stub_dict[cls.SERVICE_NAME] = cls._instance
@classmethod
def Uninstall(cls):
"""Uninstalls the shim.
Note that there's no need to uninstall a shim after each request. You can
install it once at import time and leave it there between requests.
"""
if cls._instance:
stub_dict = apiproxy_stub_map.apiproxy._APIProxyStubMap__stub_map
stub_dict[cls.SERVICE_NAME] = cls._instance._wrapped_stub
cls._instance = None
class DatastoreCachingShim(APIProxyShim):
SERVICE_NAME = 'datastore_v3'
def __init__(self, default_stub):
super(DatastoreCachingShim, self).__init__(default_stub)
self.local = threading.local()
self.local.to_delete = dict()
def _Dynamic_Get(self, request, response):
"""Intercepts get requests and returns them from cache if available."""
logging.info("Tx: %s, Keys: %s", request.has_transaction(), [str(x) for x in request.key_list()])
if request.has_transaction():
self.CallWrappedStub('Get', request, response)
return
new_request = datastore_pb.GetRequest()
new_response = datastore_pb.GetResponse()
encoded_keys = [k.Encode() for k in request.key_list()]
cached = memcache.get_multi(encoded_keys)
for key, encoded_key in itertools.izip(request.key_list(), encoded_keys):
if encoded_key not in cached:
new_request.add_key().CopyFrom(key)
if new_request.key_size() > 0:
self.CallWrappedStub('Get', new_request, new_response)
entity_iter = iter(new_response.entity_list())
to_put = dict()
for encoded_key in encoded_keys:
entity = cached.get(encoded_key, None)
if entity:
response.add_entity().mutable_entity().CopyFrom(entity)
else:
entity = entity_iter.next()
if entity.entity().IsInitialized():
# self.entity_cache[encoded_key] = entity.entity()
to_put[encoded_key] = entity.entity()
response.add_entity().CopyFrom(entity)
if to_put:
memcache.set_multi(to_put)
def _Dynamic_Put(self, request, response):
"""Intercepts puts and adds them to the cache."""
self.CallWrappedStub('Put', request, response)
# If this is in a transaction we mark these entries for deletion
# when and if the transaction commits.
if request.has_transaction():
to_delete = [k.Encode() for k in response.key_list()]
self.local.to_delete[request.transaction().handle()].extend(to_delete)
return
to_put = dict()
for e, k in itertools.izip(request.entity_list(), response.key_list()):
e.key().CopyFrom(k)
to_put[k.Encode()] = e
if to_put:
memcache.set_multi(to_put)
def _Dynamic_Delete(self, request, response):
"""Intercepts deletes and deletes entries from the cache."""
self.CallWrappedStub('Delete', request, response)
to_delete = [k.Encode() for k in request.key_list()]
# If this is in a transaction we mark these entries for deletion
# when and if the transaction commits.
if request.has_transaction():
self.local.to_delete[request.transaction().handle()].extend(to_delete)
return
memcache.delete_multi(to_delete)
def _Dynamic_Next(self, request, response):
"""Intercepts query results and caches the returned entities."""
self.CallWrappedStub('Next', request, response)
to_put = dict([(e.key().Encode(), e) for e in response.result_list()])
memcache.set_multi(to_put)
def _Dynamic_BeginTransaction(self, request, transaction):
"""Intercepts the beginning of transactions and creates thread local storage for deletions"""
self.CallWrappedStub('BeginTransaction', request, transaction)
self.local.to_delete[transaction.handle()] = []
def _Dynamic_Commit(self, transaction, transaction_response):
"""Intercepts the commit of transactions and deletes all entities that were modified/delete by this transaction"""
# We delete from cache before we commit otherwise we have a race condition.
to_delete = self.local.to_delete[transaction.handle()]
if to_delete:
memcache.delete_multi(to_delete)
del self.local.to_delete[transaction.handle()]
self.CallWrappedStub('Commit', transaction, transaction_response)
def _Dynamic_Rollback(self, transaction, transaction_response):
"""Intercepts the rollback of transactions and clears the thread local storage for them"""
del self.local.to_delete[transaction.handle()]
self.CallWrappedStub('Rollback', transaction, transaction_response)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A Python "serializer", based on the default Django python serializer.
The only customisation is in the deserialization process which needs to take
special care to resolve the name and parent attributes of the key for each
entity and also recreate the keys for any references appropriately.
"""
from django.conf import settings
from django.core.serializers import base
from django.core.serializers import python
from django.db import models
from google.appengine.api import datastore_types
from google.appengine.ext import db
from django.utils.encoding import smart_unicode
Serializer = python.Serializer
class FakeParent(object):
"""Fake parent 'model' like object.
This class exists to allow a parent object to be provided to a new model
without having to load the parent instance itself.
"""
def __init__(self, parent_key):
self._entity = parent_key
def Deserializer(object_list, **options):
"""Deserialize simple Python objects back into Model instances.
It's expected that you pass the Python objects themselves (instead of a
stream or a string) to the constructor
"""
models.get_apps()
for d in object_list:
# Look up the model and starting build a dict of data for it.
Model = python._get_model(d["model"])
data = {}
key = resolve_key(Model._meta.module_name, d["pk"])
if key.name():
data["key_name"] = key.name()
parent = None
if key.parent():
parent = FakeParent(key.parent())
m2m_data = {}
# Handle each field
for (field_name, field_value) in d["fields"].iteritems():
if isinstance(field_value, str):
field_value = smart_unicode(
field_value, options.get("encoding",
settings.DEFAULT_CHARSET),
strings_only=True)
field = Model.properties()[field_name]
if isinstance(field, db.Reference):
# Resolve foreign key references.
data[field.name] = resolve_key(Model._meta.module_name, field_value)
if not data[field.name].name():
raise base.DeserializationError(u"Cannot load Reference with "
"unnamed key: '%s'" % field_value)
else:
data[field.name] = field.validate(field_value)
# Create the new model instance with all it's data, but no parent.
object = Model(**data)
# Now add the parent into the hidden attribute, bypassing the type checks
# in the Model's __init__ routine.
object._parent = parent
# When the deserialized object is saved our replacement DeserializedObject
# class will set object._parent to force the real parent model to be loaded
# the first time it is referenced.
yield base.DeserializedObject(object, m2m_data)
def resolve_key(model, key_data):
"""Creates a Key instance from a some data.
Args:
model: The name of the model this key is being resolved for. Only used in
the fourth case below (a plain key_name string).
key_data: The data to create a key instance from. May be in four formats:
* The str() output of a key instance. Eg. A base64 encoded string.
* The repr() output of a key instance. Eg. A string for eval().
* A list of arguments to pass to db.Key.from_path.
* A single string value, being the key_name of the instance. When this
format is used the resulting key has no parent, and is for the model
named in the model parameter.
Returns:
An instance of db.Key. If the data cannot be used to create a Key instance
an error will be raised.
"""
if isinstance(key_data, list):
# The key_data is a from_path sequence.
return db.Key.from_path(*key_data)
elif isinstance(key_data, basestring):
if key_data.find("from_path") != -1:
# key_data is encoded in repr(key) format
return eval(key_data)
else:
try:
# key_data encoded a str(key) format
return db.Key(key_data)
except datastore_types.datastore_errors.BadKeyError, e:
# Final try, assume it's a plain key name for the model.
return db.Key.from_path(model, key_data)
else:
raise base.DeserializationError(u"Invalid key data: '%s'" % key_data)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Replaces the default Django XML serializer with one that uses the built in
ToXml method for each entity.
"""
import re
from django.conf import settings
from django.core.serializers import base
from django.core.serializers import xml_serializer
from django.db import models
from google.appengine.api import datastore_types
from google.appengine.ext import db
from python import FakeParent
getInnerText = xml_serializer.getInnerText
class Serializer(xml_serializer.Serializer):
"""A Django Serializer class to convert datastore models to XML.
This class relies on the ToXml method of the entity behind each model to do
the hard work.
"""
def __init__(self, *args, **kwargs):
super(Serializer, self).__init__(*args, **kwargs)
self._objects = []
def handle_field(self, obj, field):
"""Fields are not handled individually."""
pass
def handle_fk_field(self, obj, field):
"""Fields are not handled individually."""
pass
def start_object(self, obj):
"""Nothing needs to be done to start an object."""
pass
def end_object(self, obj):
"""Serialize the object to XML and add to the list of objects to output.
The output of ToXml is manipulated to replace the datastore model name in
the "kind" tag with the Django model name (which includes the Django
application name) to make importing easier.
"""
xml = obj._entity.ToXml()
xml = xml.replace(u"""kind="%s" """ % obj._entity.kind(),
u"""kind="%s" """ % unicode(obj._meta))
self._objects.append(xml)
def getvalue(self):
"""Wrap the serialized objects with XML headers and return."""
str = u"""<?xml version="1.0" encoding="utf-8"?>\n"""
str += u"""<django-objects version="1.0">\n"""
str += u"".join(self._objects)
str += u"""</django-objects>"""
return str
class Deserializer(xml_serializer.Deserializer):
"""A Django Deserializer class to convert XML to Django objects.
This is a fairly manualy and simplistic XML parser, it supports just enough
functionality to read the keys and fields for an entity from the XML file and
construct a model object.
"""
def next(self):
"""Replacement next method to look for 'entity'.
The default next implementation exepects 'object' nodes which is not
what the entity's ToXml output provides.
"""
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "entity":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""Convert an <entity> node to a DeserializedObject"""
Model = self._get_model_from_node(node, "kind")
data = {}
key = db.Key(node.getAttribute("key"))
if key.name():
data["key_name"] = key.name()
parent = None
if key.parent():
parent = FakeParent(key.parent())
m2m_data = {}
# Deseralize each field.
for field_node in node.getElementsByTagName("property"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' "
"attribute")
field = Model.properties()[field_name]
field_value = getInnerText(field_node).strip()
if isinstance(field, db.Reference):
m = re.match("tag:.*\[(.*)\]", field_value)
if not m:
raise base.DeserializationError(u"Invalid reference value: '%s'" %
field_value)
key = m.group(1)
key_obj = db.Key(key)
if not key_obj.name():
raise base.DeserializationError(u"Cannot load Reference with "
"unnamed key: '%s'" % field_value)
data[field.name] = key_obj
else:
data[field.name] = field.validate(field_value)
# Create the new model instance with all it's data, but no parent.
object = Model(**data)
# Now add the parent into the hidden attribute, bypassing the type checks
# in the Model's __init__ routine.
object._parent = parent
# When the deserialized object is saved our replacement DeserializedObject
# class will set object._parent to force the real parent model to be loaded
# the first time it is referenced.
return base.DeserializedObject(object, m2m_data)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import types
from google.appengine.ext import db
from django import VERSION
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields import Field
from django.db.models.options import Options
from django.db.models.loading import register_models, get_model
class ModelManager(object):
"""Replacement for the default Django model manager."""
def __init__(self, owner):
self.owner = owner
def __getattr__(self, name):
"""Pass all attribute requests through to the real model"""
return getattr(self.owner, name)
class ModelOptions(object):
"""Replacement for the default Django options class.
This class sits at ._meta of each model. The primary information supplied by
this class that needs to be stubbed out is the list of fields on the model.
"""
# Django 1.1 compat
proxy = None
def __init__(self, cls):
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
model_module = sys.modules[cls.__module__]
self.app_label = model_module.__name__.split('.')[-2]
self.abstract = False
class pk:
"""Stub the primary key to always be 'key_name'"""
name = "key_name"
def __str__(self):
return "%s.%s" % (self.app_label, self.module_name)
@property
def many_to_many(self):
"""The datastore does not support many to many relationships."""
return []
class Relation(object):
def __init__(self, to):
self.field_name = "key_name"
def PropertyWrapper(prop):
"""Wrapper for db.Property to make it look like a Django model Property"""
if isinstance(prop, db.Reference):
prop.rel = Relation(prop.reference_class)
else:
prop.rel = None
prop.serialize = True
# NOTE(termie): These are rather useless hacks to get around Django changing
# their approach to "fields" and breaking encapsulation a bit,
def _get_val_from_obj(obj):
if obj:
return getattr(obj, prop.name)
else:
return prop.default_value()
def value_to_string(obj):
if obj:
return str(getattr(obj, prop.name))
else:
return str(prop.default_value())
prop._get_val_from_obj = _get_val_from_obj
prop.value_to_string = value_to_string
return prop
class PropertiedClassWithDjango(db.PropertiedClass):
"""Metaclass for the combined Django + App Engine model class.
This metaclass inherits from db.PropertiedClass in the appengine library.
This metaclass has two additional purposes:
1) Register each model class created with Django (the parent class will take
care of registering it with the appengine libraries).
2) Add the (minimum number) of attributes and methods to make Django believe
the class is a normal Django model.
The resulting classes are still not generally useful as Django classes and
are intended to be used by Django only in limited situations such as loading
and dumping fixtures.
"""
def __new__(cls, name, bases, attrs):
"""Creates a combined appengine and Django model.
The resulting model will be known to both the appengine libraries and
Django.
"""
if name == 'BaseModel':
# This metaclass only acts on subclasses of BaseModel.
return super(PropertiedClassWithDjango, cls).__new__(cls, name,
bases, attrs)
new_class = super(PropertiedClassWithDjango, cls).__new__(cls, name,
bases, attrs)
new_class._meta = ModelOptions(new_class)
new_class.objects = ModelManager(new_class)
new_class._default_manager = new_class.objects
new_class.DoesNotExist = types.ClassType('DoesNotExist',
(ObjectDoesNotExist,), {})
m = get_model(new_class._meta.app_label, name, False)
if m:
return m
register_models(new_class._meta.app_label, new_class)
return get_model(new_class._meta.app_label, name, False)
def __init__(cls, name, bases, attrs):
"""Initialises the list of Django properties.
This method takes care of wrapping the properties created by the superclass
so that they look like Django properties and installing them into the
._meta object of the class so that Django can find them at the appropriate
time.
"""
super(PropertiedClassWithDjango, cls).__init__(name, bases, attrs)
if name == 'BaseModel':
# This metaclass only acts on subclasses of BaseModel.
return
fields = [PropertyWrapper(p) for p in cls._properties.values()]
cls._meta.local_fields = fields
class BaseModel(db.Model):
"""Combined appengine and Django model.
All models used in the application should derive from this class.
"""
__metaclass__ = PropertiedClassWithDjango
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def _get_pk_val(self):
"""Return the string representation of the model's key"""
return unicode(self.key())
def __repr__(self):
"""Create a string that can be used to construct an equivalent object.
e.g. eval(repr(obj)) == obj
"""
# First, creates a dictionary of property names and values. Note that
# property values, not property objects, has to be passed in to constructor.
def _MakeReprTuple(prop_name):
prop = getattr(self.__class__, prop_name)
return (prop_name, prop.get_value_for_datastore(self))
d = dict([_MakeReprTuple(prop_name) for prop_name in self.properties()])
return "%s(**%s)" % (self.__class__.__name__, repr(d))
class RegistrationTestModel(BaseModel):
"""Used to check registration with Django is working correctly.
Django 0.96 only recognises models defined within an applications models
module when get_models() is called so this definition must be here rather
than within the associated test (tests/model_test.py).
"""
pass
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module looks after initialising the appengine api stubs."""
import logging
import os
from appengine_django import appid
from appengine_django import have_appserver
from appengine_django.db.creation import DatabaseCreation
from django.db.backends import BaseDatabaseWrapper
from django.db.backends import BaseDatabaseFeatures
from django.db.backends import BaseDatabaseOperations
def get_datastore_paths():
"""Returns a tuple with the path to the datastore and history file.
The datastore is stored in the same location as dev_appserver uses by
default, but the name is altered to be unique to this project so multiple
Django projects can be developed on the same machine in parallel.
Returns:
(datastore_path, history_path)
"""
from google.appengine.tools import dev_appserver_main
datastore_path = dev_appserver_main.DEFAULT_ARGS['datastore_path']
history_path = dev_appserver_main.DEFAULT_ARGS['history_path']
datastore_path = datastore_path.replace("dev_appserver", "django_%s" % appid)
history_path = history_path.replace("dev_appserver", "django_%s" % appid)
return datastore_path, history_path
def get_test_datastore_paths(inmemory=True):
"""Returns a tuple with the path to the test datastore and history file.
If inmemory is true, (None, None) is returned to request an in-memory
datastore. If inmemory is false the path returned will be similar to the path
returned by get_datastore_paths but with a different name.
Returns:
(datastore_path, history_path)
"""
if inmemory:
return None, None
datastore_path, history_path = get_datastore_paths()
datastore_path = datastore_path.replace("datastore", "testdatastore")
history_path = history_path.replace("datastore", "testdatastore")
return datastore_path, history_path
def destroy_datastore(datastore_path, history_path):
"""Destroys the appengine datastore at the specified paths."""
for path in [datastore_path, history_path]:
if not path: continue
try:
os.remove(path)
except OSError, e:
if e.errno != 2:
logging.error("Failed to clear datastore: %s" % e)
class DatabaseError(Exception):
"""Stub class for database errors. Required by Django"""
pass
class IntegrityError(Exception):
"""Stub class for database integrity errors. Required by Django"""
pass
class DatabaseFeatures(BaseDatabaseFeatures):
"""Stub class to provide the feaures member expected by Django"""
pass
class DatabaseOperations(BaseDatabaseOperations):
"""Stub class to provide the options member expected by Django"""
pass
class DatabaseWrapper(BaseDatabaseWrapper):
"""App Engine database definition for Django.
This "database" backend does not support any of the standard backend
operations. The only task that it performs is to setup the api stubs required
by the appengine libraries if they have not already been initialised by an
appserver.
"""
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.creation = DatabaseCreation(self)
self.use_test_datastore = kwargs.get("use_test_datastore", False)
self.test_datastore_inmemory = kwargs.get("test_datastore_inmemory", True)
if have_appserver:
return
self._setup_stubs()
def _get_paths(self):
if self.use_test_datastore:
return get_test_datastore_paths(self.test_datastore_inmemory)
else:
return get_datastore_paths()
def _setup_stubs(self):
# If this code is being run without an appserver (eg. via a django
# commandline flag) then setup a default stub environment.
from google.appengine.tools import dev_appserver_main
args = dev_appserver_main.DEFAULT_ARGS.copy()
args['datastore_path'], args['history_path'] = self._get_paths()
from google.appengine.tools import dev_appserver
dev_appserver.SetupStubs(appid, **args)
if self.use_test_datastore:
logging.debug("Configured API stubs for the test datastore")
else:
logging.debug("Configured API stubs for the development datastore")
def flush(self):
"""Helper function to remove the current datastore and re-open the stubs"""
destroy_datastore(*self._get_paths())
self._setup_stubs()
def close(self):
pass
def _commit(self):
pass
def cursor(self, *args):
pass
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Explicitly set the name of this package to "appengine".
#
# The rationale for this is so that Django can refer to the database as
# "appengine" even though at a filesystem level it appears as the "db" package
# within the appengine_django package.
__name__ = "appengine"
| Python |
from appengine_django.models import BaseModel
from google.appengine.ext import db
# Create your models here.
| Python |
# Create your views here.
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import db
class Session(db.Model):
"""Django compatible App Engine Datastore session model."""
session_data = db.BlobProperty()
expire_date = db.DateTimeProperty()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from django.contrib.sessions.backends import base
from django.core.exceptions import SuspiciousOperation
from appengine_django.sessions.models import Session
class SessionStore(base.SessionBase):
"""A key-based session store for Google App Engine."""
def load(self):
session = self._get_session(self.session_key)
if session:
try:
return self.decode(session.session_data)
except SuspiciousOperation:
# Create a new session_key for extra security.
pass
self.session_key = self._get_new_session_key()
self._session_cache = {}
self.save()
# Ensure the user is notified via a new cookie.
self.modified = True
return {}
def save(self, must_create=False):
if must_create and self.exists(self.session_key):
raise base.CreateError
session = Session(
key_name='k:' + self.session_key,
session_data = self.encode(self._session),
expire_date = self.get_expiry_date())
session.put()
def exists(self, session_key):
return Session.get_by_key_name('k:' + session_key) is not None
def delete(self, session_key=None):
if session_key is None:
session_key = self._session_key
session = self._get_session(session_key=session_key)
if session:
session.delete()
def _get_session(self, session_key):
session = Session.get_by_key_name('k:' + session_key)
if session:
if session.expire_date > datetime.now():
return session
session.delete()
return None
def create(self):
while True:
self.session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except base.CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
self._session_cache = {}
return
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
App Engine compatible models for the Django authentication framework.
"""
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.encoding import smart_str
import urllib
from django.db.models.manager import EmptyManager
from google.appengine.api import users
from google.appengine.ext import db
from appengine_django.models import BaseModel
class User(BaseModel):
"""A model with the same attributes and methods as a Django user model.
The model has two additions. The first addition is a 'user' attribute which
references a App Engine user. The second is the 'get_djangouser_for_user'
classmethod that should be used to retrieve a DjangoUser instance from a App
Engine user object.
"""
user = db.UserProperty(required=True)
username = db.StringProperty(required=True)
first_name = db.StringProperty()
last_name = db.StringProperty()
email = db.EmailProperty()
password = db.StringProperty()
is_staff = db.BooleanProperty(default=False, required=True)
is_active = db.BooleanProperty(default=True, required=True)
is_superuser = db.BooleanProperty(default=False, required=True)
last_login = db.DateTimeProperty(auto_now_add=True, required=True)
date_joined = db.DateTimeProperty(auto_now_add=True, required=True)
groups = EmptyManager()
user_permissions = EmptyManager()
def __unicode__(self):
return self.username
def __str__(self):
return unicode(self).encode('utf-8')
@classmethod
def get_djangouser_for_user(cls, user):
query = cls.all().filter("user =", user)
if query.count() == 0:
django_user = cls(user=user, email=user.email(), username=user.nickname())
django_user.save()
else:
django_user = query.get()
return django_user
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def set_unusable_password(self):
raise NotImplementedError
def has_usable_password(self):
raise NotImplementedError
def get_group_permissions(self):
return self.user_permissions
def get_all_permissions(self):
return self.user_permissions
def has_perm(self, perm):
return False
def has_perms(self, perm_list):
return False
def has_module_perms(self, module):
return False
def get_and_delete_messages(self):
"""Gets and deletes messages for this user"""
msgs = []
for msg in self.message_set:
msgs.append(msg)
msg.delete()
return msgs
def is_anonymous(self):
"""Always return False"""
return False
def is_authenticated(self):
"""Always return True"""
return True
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def get_full_name(self):
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def email_user(self, subject, message, from_email):
"""Sends an email to this user.
According to the App Engine email API the from_email must be the
email address of a registered administrator for the application.
"""
mail.send_mail(subject,
message,
from_email,
[self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
When using the App Engine authentication framework, users are created
automatically.
"""
from django.contrib.auth.models import SiteProfileNotAvailable
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not hasattr(settings, "AUTH_PROFILE_MODULE"):
raise SiteProfileNotAvailable
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
model = models.get_model(app_label, model_name)
self._profile_cache = model.all().filter("user =", self).get()
if not self._profile_cache:
raise model.DoesNotExist
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class Group(BaseModel):
"""Group model not fully implemented yet."""
# TODO: Implement this model, requires contenttypes
name = db.StringProperty()
permissions = EmptyManager()
class Message(BaseModel):
"""User message model"""
user = db.ReferenceProperty(User)
message = db.TextProperty()
class Permission(BaseModel):
"""Permission model not fully implemented yet."""
# TODO: Implement this model, requires contenttypes
name = db.StringProperty()
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
BASIC_TESTS = """
>>> from google.appengine.api import users
>>> from models import User, AnonymousUser
>>> appengine_user = users.User("test@example.com")
>>> django_user = User.get_djangouser_for_user(appengine_user)
>>> django_user.email == appengine_user.email()
True
>>> django_user.username == appengine_user.nickname()
True
>>> django_user.user == appengine_user
True
>>> django_user.username = 'test2'
>>> key = django_user.save()
>>> django_user.username == 'test2'
True
>>> django_user2 = User.get_djangouser_for_user(appengine_user)
>>> django_user2 == django_user
True
>>> django_user.is_authenticated()
True
>>> django_user.is_staff
False
>>> django_user.is_active
True
>>> a = AnonymousUser()
>>> a.is_authenticated()
False
>>> a.is_staff
False
>>> a.is_active
False
>>> a.groups.all()
[]
>>> a.user_permissions.all()
[]
"""
__test__ = {'BASIC_TESTS': BASIC_TESTS}
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Template tags for the auth module. These are inserted into Django as "built-in"
tags so you do not need to use the load statement in your template to get
access to them.
"""
from django.template import Library
from django.template import Node
from google.appengine.api import users
class AuthLoginUrlsNode(Node):
"""Template node that creates an App Engine login or logout URL.
If create_login_url is True the App Engine's login URL is rendered into
the template, otherwise the logout URL.
"""
def __init__(self, create_login_url, redirect):
self.redirect = redirect
self.create_login_url = create_login_url
def render(self, context):
if self.create_login_url:
return users.create_login_url(self.redirect)
else:
return users.create_logout_url(self.redirect)
def auth_login_urls(parser, token):
"""Template tag registered as 'auth_login_url' and 'auth_logout_url'
when the module is imported.
Both tags take an optional argument that specifies the redirect URL and
defaults to '/'.
"""
bits = list(token.split_contents())
if len(bits) == 2:
redirect = bits[1]
else:
redirect = "/"
login = bits[0] == "auth_login_url"
return AuthLoginUrlsNode(login, redirect)
register = Library()
register.tag("auth_login_url", auth_login_urls)
register.tag("auth_logout_url", auth_login_urls)
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorators for the authentication framework."""
from django.http import HttpResponseRedirect
from google.appengine.api import users
def login_required(function):
"""Implementation of Django's login_required decorator.
The login redirect URL is always set to request.path
"""
def login_required_wrapper(request, *args, **kw):
if request.user.is_authenticated():
return function(request, *args, **kw)
return HttpResponseRedirect(users.create_login_url(request.path))
return login_required_wrapper
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Authentication module that mimics the behavior of Django's authentication
implementation.
Limitations:
- all user permissions methods are not available (requires contenttypes)
"""
from django.template import add_to_builtins
add_to_builtins('appengine_django.auth.templatetags')
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib.auth.models import AnonymousUser
from google.appengine.api import users
from appengine_django.auth.models import User
class LazyUser(object):
def __get__(self, request, obj_type=None):
if not hasattr(request, '_cached_user'):
user = users.get_current_user()
if user:
request._cached_user = User.get_djangouser_for_user(user)
else:
request._cached_user = AnonymousUser()
return request._cached_user
class AuthenticationMiddleware(object):
def process_request(self, request):
request.__class__.user = LazyUser()
return None
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
from django.core.management.base import BaseCommand
def run_appcfg():
# import this so that we run through the checks at the beginning
# and report the appropriate errors
import appcfg
# We don't really want to use that one though, it just executes this one
from google.appengine.tools import appcfg
# Reset the logging level to WARN as appcfg will spew tons of logs on INFO
logging.getLogger().setLevel(logging.WARN)
# Note: if we decide to change the name of this command to something other
# than 'vacuum_indexes' we will have to munge the args to replace whatever
# we called it with 'vacuum_indexes'
new_args = sys.argv[:]
new_args.append('.')
appcfg.main(new_args)
class Command(BaseCommand):
"""Calls the appcfg.py's vacuum_indexes command for the current project.
Any additional arguments are passed directly to appcfg.py.
"""
help = 'Calls appcfg.py vacuum_indexes for the current project.'
args = '[any appcfg.py options]'
def run_from_argv(self, argv):
run_appcfg()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Overrides the default Django reset command.
"""
help = 'Clears the current datastore.'
def run_from_argv(self, argv):
from django.db import connection
connection.flush()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from appengine_django.db.base import get_datastore_paths
from django.core.management.base import BaseCommand
def start_dev_appserver():
"""Starts the appengine dev_appserver program for the Django project.
The appserver is run with default parameters. If you need to pass any special
parameters to the dev_appserver you will have to invoke it manually.
"""
from google.appengine.tools import dev_appserver_main
progname = sys.argv[0]
args = []
# hack __main__ so --help in dev_appserver_main works OK.
sys.modules['__main__'] = dev_appserver_main
# Set bind ip/port if specified.
if len(sys.argv) > 2:
addrport = sys.argv[2]
try:
addr, port = addrport.split(":")
except ValueError:
addr, port = None, addrport
if not port.isdigit():
print "Error: '%s' is not a valid port number." % port
sys.exit(1)
else:
addr, port = None, "8000"
if addr:
args.extend(["--address", addr])
if port:
args.extend(["--port", port])
# Add email settings
from django.conf import settings
args.extend(['--smtp_host', settings.EMAIL_HOST,
'--smtp_port', str(settings.EMAIL_PORT),
'--smtp_user', settings.EMAIL_HOST_USER,
'--smtp_password', settings.EMAIL_HOST_PASSWORD])
# Allow skipped files so we don't die
args.extend(['--allow_skipped_files'])
# Pass the application specific datastore location to the server.
p = get_datastore_paths()
args.extend(["--datastore_path", p[0], "--history_path", p[1]])
# Append the current working directory to the arguments.
dev_appserver_main.main([progname] + args + [os.getcwdu()])
class Command(BaseCommand):
"""Overrides the default Django runserver command.
Instead of starting the default Django development server this command
fires up a copy of the full fledged appengine dev_appserver that emulates
the live environment your application will be deployed to.
"""
help = 'Runs a copy of the appengine development server.'
args = '[optional port number, or ipaddr:port]'
def run_from_argv(self, argv):
start_dev_appserver()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
from django.core.management.base import BaseCommand
def run_appcfg():
# import this so that we run through the checks at the beginning
# and report the appropriate errors
import appcfg
# We don't really want to use that one though, it just executes this one
from google.appengine.tools import appcfg
# Reset the logging level to WARN as appcfg will spew tons of logs on INFO
logging.getLogger().setLevel(logging.WARN)
# Note: if we decide to change the name of this command to something other
# than 'update' we will have to munge the args to replace whatever
# we called it with 'update'
new_args = sys.argv[:]
new_args.append('.')
appcfg.main(new_args)
class Command(BaseCommand):
"""Calls the appcfg.py's update command for the current project.
Any additional arguments are passed directly to appcfg.py.
"""
help = 'Calls appcfg.py update for the current project.'
args = '[any appcfg.py options]'
def run_from_argv(self, argv):
run_appcfg()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.