code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 10, 2011
@author: apple
'''
import time
import cPickle
import pymongo
import types
class MongoModel(object):
lasterror = None
def __init__(self, db , pk='_id', cache=None , expire=0):
self.db = db
self.pk = pk
self.cache = cache if getattr(cache, 'set',None) else None
self.expire = expire
@classmethod
def getlasterror(self):
return self.lasterror
def get(self, **kv):
_id = kv.get(self.pk,'')
if _id:
data = self.__get_cache__(_id)
if data:
return data
result = self.db.find(kv)
if result.count() == 0:
return None
self.__set_cache__(result[0])
return result[0]
def filter(self,spec,skip=0,limit=10,sortfield=None,sortmethod=pymongo.DESCENDING):
result = self.db.find(spec,skip=skip,limit=limit) if not sortfield else self.db.find(spec,skip=skip,limit=limit).sort(sortfield,sortmethod)
return result
def save(self,**kv):
if not kv.has_key(self.pk):
self.lasterror = 'No primary key. at save'
return False
kv['timestamp'] = kv.get('timestamp',None) or time.time()
try:
self.db.insert(kv,safe=True)
self.__set_cache__(kv)
return True
except:
self.lasterror = 'Insert into mongodb fail.at save'
return False
def insert(self,*docs):
return [self.save(d) for d in docs]
def update(self,spec,doc,**kv):
if not kv:
kv['safe'] = {}
_id = spec.get(self.pk,None)
doc.pop(self.pk,None)
kv['safe'] = True
if _id:
data = self.__get_cache__(_id)
if data:
data.update(doc.items())
self.__set_cache__(data)
result = self.db.update(spec,{'$set':doc},**kv)
return result['updatedExisting']
def remove(self,_id):
return self.db.remove(_id,safe=True).get('n')
def __len__(self):
return self.db.find().count()
def __set_cache__(self,item,**kv):
if not self.cache or not item.has_key(self.pk):
self.lasterror = 'No primary key or no cache server.at __set_cache__'
return None
data = cPickle.dumps(item)
if type(item[self.pk]) != types.StringType:
item[self.pk] = item[self.pk].encode('utf-8')
return self.cache.set(item[self.pk],data,**kv)
def __get_cache__(self,key):
if not self.cache :
self.lasterror = 'No cache server.at __get_cache__'
return None
if type(key) != types.StringType:
key = key.encode('utf-8')
rawdata = self.cache.get(key)
try:
return cPickle.loads(rawdata)
except:
self.lasterror = 'cPickle.loads fail,invalid data format.at __get_cache__'
return None
def __cache_all__(self,items):
for i in items:
self.__set_cache__(i)
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
config.py
Created by AlanYang on 2011-06-04.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
class Config(object):
#SERVER_NAME = 'freeasker.com application service'
#mongodb config
MONGO_HOST = '127.0.0.1'
MONGO_PORT = 27017
#redis config
REDIS_HOST = '222.73.18.67'
REDIS_PORT = 6379
#memcache config
CACHE_MEMCACHED_SERVERS = ['127.0.0.1:11211',]
CACHE_TYPE = 'memcached'
CACHE_KEY_PREFIX = 'andsoon'
#redis question db number
DB_REDIS_QUESTION = 1
#redis user db number
DB_REDIS_USER = 2
#redis tags db number
DB_REDIS_TAG = 3
#redis search job db number
DB_REDIS_SEARCH = 4
#default cache user timeout value
DEFAULT_EXPIRE = 120
#default cache question timeout value
QUESTION_EXPIRE = 3600 * 3
#default cache tag infomation timeout value
TAG_EXPIRE = 1200
#cache user data
CACHED_USER_DATA = True
#cache question
CACHED_QUESTION_DATA = True
SECRET_KEY = 'Alan and ida @2005'
DEBUG = False
TESTING = False
JOBS_KEY = 'freeasker:search:jobs'
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
__init__.py
Created by AlanYang on 2011-06-20.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import sys
import os
def main():
pass
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
cache.py
Created by AlanYang on 2011-06-20.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
from functools import wraps
from config import memcacheclients
class Cache(object):
def __init__(self,contail):
if not getattr(contail, 'memcache',None):
import memcache
contail.memcache = memcache.Client(memcacheclients)
self.memcache = contail.memcache
self.expire = 10
def caching(self,expire=None,prefix='%s'):
def __magic__(func):
@wraps(func)
def __do__(*args,**kv):
key = prefic%func.__name__
value = self.memcache.get(key)
if not value:
value = func(*args,**kv)
expire = expire or self.expire
self.memcache.set(key,value,expire)
return value
return __do__
return __magic__
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
__init__.py
Created by AlanYang on 2011-06-20.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import sys
import os
def main():
pass
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
cache.py
Created by AlanYang on 2011-06-20.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
from functools import wraps
from config import memcacheclients
class Cache(object):
def __init__(self,contail):
if not getattr(contail, 'memcache',None):
import memcache
contail.memcache = memcache.Client(memcacheclients)
self.memcache = contail.memcache
self.expire = 10
def caching(self,expire=None,prefix='%s'):
def __magic__(func):
@wraps(func)
def __do__(*args,**kv):
key = prefic%func.__name__
value = self.memcache.get(key)
if not value:
value = func(*args,**kv)
expire = expire or self.expire
self.memcache.set(key,value,expire)
return value
return __do__
return __magic__
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
test.py
Created by AlanYang on 2011-06-17.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import sys
import os
import types
import xapian
from multiprocessing import Process
from application import app
#insert db.set_document(doc)
#remove db.delete_document(did)
#find db.get_document(did)
#update db.replace_document(did,newdoc)
def main():
db = xapian.WritableDatabase('/Users/apple/Project/xapiandb',xapian.DB_OPEN)
doc = db.get_document(2)
print doc.get_data()
doc.set_data("""Created by AlanYang on 2011-06-17...""")
db.replace_document(2,doc)
db.commit()
db.flush()
def job():
"""docstring for job"""
print 10
def __is_string__(s):
"""docstring for __is_string__"""
try:
s + ''
return True
except:
return False
def __flat__(field):
result = []
if type(field) == types.ListType:
for i in field:
if not __is_string__(i):
result.extend(__flat__(i))
else:
result.append(i)
elif type(field) == types.DictType:
for k,v in field.items():
if not __is_string__(v):
result.extend(__flat__(v))
else:
result.append(v)
return result
if __name__ == '__main__':
print __flat__({'a':['1','2','3','4',['5','6','7',['8',['9']]]],'b':'123123123','c':{'as':'56767','213':{'asd':'123123'}}})
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
usermodel.py
Created by AlanYang on 2011-06-04.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import redis
import cPickle
import hashlib
import pymongo
from flask import g
from mongomodel import MongoModel
from application import app
from apps.search.searchmodel import index
def login(uid,passwd):
user = UserModel()
return user.login(uid, passwd)
def changeuser(uid,user):
return UserModel().changeuser(uid,**user)
@index(['nick'])
def setuser(user):
print user
return UserModel(**user).commit()
def changepasswd(uid,oldpasswd,newpasswd):
return UserModel().changepasswd(uid, oldpasswd, newpasswd)
def getuser(uid):
return UserModel().getuser(uid)
@app.signal_addquestion.connect
def adduserquestion(sender,**kv):
uid = kv.pop('uid',None)
qid = kv.pop('qid',None)
if not uid or not qid:
return False
return UserModel().addquestion(uid, qid)
def getuserquestions(uid,skip=0,limit=10):
qs = UserModel().getquestions(uid, skip, limit)
return filter(lambda _q:not not _q,[getquestion(q) for q in qs])
@app.signal_addanswer.connect
def adduseranswer(sender,**kv):
uid = kv.pop('uid',None)
qid = kv.pop('qid',None)
answer = kv.pop('answer',None)
if not uid or not qid:
return False
return UserModel().addanswer(uid, qid, answer)
def getuseranswers(uid,skip=0,limit=10):
ans = UserModel().getanswers(uid, skip, limit)
return [app.signal_getquestion.send(__name__,qid=answer['qid'])[0][1] for answer in ans]
adduser = setuser
class UserModel(MongoModel):
def __init__(self,**user):
self.user = user
if not getattr(g, 'mongo',None):
g.mongo = pymongo.Connection(app.config['MONGO_HOST'],app.config['MONGO_PORT'])
if not getattr(g, 'redis',None):
g.redis = redis.Redis(app.config['REDIS_HOST'],app.config['REDIS_PORT'],db=app.config['DB_REDIS_USER'])
MongoModel.__init__(self, g.mongo.freeasker.user, '_id', g.redis, app.config['DEFAULT_EXPIRE'])
def __question_key__(self,uid):
return '%s:%s:%s'%('user','question',uid)
def __answer_key__(self,uid):
return '%s:%s:%s'%('user','answer',uid)
def __set_cache__(self, user):
# user.pop('passwd',None)
result = MongoModel.__set_cache__(self, user)
if result:
return self.cache.expire(user[self.pk],self.expire)
return False
def commit(self):
if not self.user:
self.lasterror = 'No user infomation.'
return False
return self.setuser(**self.user)
def getuser(self,uid):
return self.get(_id=uid)
def setuser(self,**user):
if not user.has_key('passwd') or not user.has_key('uid'):
self.lasterror = 'No user passwd or no user uid.at UserModel:setuser.'
return False
user[self.pk] = _id = user.pop('uid')
user['passwd'] = self.__cryptpasswd__(_id, user['passwd'])
result = self.save(**user)
if not result:
self.lasterror = 'The uid is sigined.'
self.db.ensure_index([('_id',pymongo.DESCENDING)] ,unique = True)
# if not SearchEngine(*[_id,]*3).insert():
# self.lasterror = 'index user \'%s\' fail'%_id
return result
def __cryptpasswd__(self,_id,passwd):
passwd = '%s<:>%s' % (_id, passwd)
return hashlib.md5(hashlib.md5(passwd).hexdigest()).hexdigest()
def login(self,uid,passwd):
passwd = self.__cryptpasswd__(uid, passwd)
where = { self.pk:uid , 'passwd':passwd}
return self.db.find(where).count() == 1
def changepasswd(self,uid,oldpasswd,newpasswd):
if self.login(uid, oldpasswd):
passwd = self.__cryptpasswd__(uid, newpasswd)
result = self.db.update({self.pk:uid},{'$set':{'passwd':passwd}},safe=True)
return result['updatedExisting']
self.lasterror = 'User oldpasswd error.at UserModel:changepasswd'
return False
def changeuser(self,uid,**user):
spec = { self.pk:uid }
return self.update(spec,user)
def search(self,s,skip=0,limit=10):
result = SearchEngine().search(s,skip=skip,limit=limit)
return filter(lambda _i:not not _i,[self.getuser(r) for r in result])
def addquestion(self,uid,qid):
where = { self.pk : uid}
result = self.db.update(where,{'$push':{'questions':qid}},safe=True)
self.cache.rpush(self.__question_key__(uid),qid)
return result['updatedExisting']
def getquestions(self,uid,skip=0,limit=10):
data = self.cache.lrange(self.__question_key__(uid),skip,limit)
if not data:
user = self.getuser(uid)
data = user.get('questions',[])
for i in data:
self.cache.rpush(self.__question_key__(uid),i)
return data
def addanswer(self,uid,qid,answer):
where = { self.pk:uid }
content = cPickle.dumps({'qid':qid,'answer':answer})
result = self.db.update(where,{'$push':{'answers':{'qid':qid,'answer':answer}}} , safe=True)
self.cache.rpush(self.__answer_key__(uid),content)
return result['updatedExisting']
def getanswers(self,uid,skip=0,limit=10):
data = self.cache.lrange(self.__answer_key__(uid),skip,limit)
if not data:
user = self.getuser(uid)
data = user.get('answers',[])
for i in data:
self.cache.rpush(self.__answer_key__(uid),cPickle.dumps(i))
return [cPickle.loads(d) for d in data]
if __name__ == '__main__':
with app.test_request_context():
# print getuseranswers('alan.yang@live.com', 0, 10)
# user = UserModel(uid = 'lo1veida@live.com' , passwd = '111' , nick = 'love')
# print user.commit()
# print user.getlasterror()
print user.setuser({'uid':'knift1128@gmail.com' , 'passwd':'111','nick':'love'})
# print user.login('knift1128@gmail.com', '222')
# print user.changeuser('knift1128@gmail.com' ,nick='ida')
# print user.changepasswd('knift1128@gmail.com', '111', '222')
# print setuser(_id='alan.yang@live.com', passwd='111', safe=True)
# print __cryptpasswd('aaa','aaa')
# print changeuser('alanyang54@gmail.com',nick='ida')
# print getuser('alanyang54@gmail.com')
# print login('alan.yang@live.com' , '111')
# print changepasswd('alanyang54@gmail.com', '111', '222')
# print changeuser('alanyang54@gmail.com', site='http://freeasker.com')
| Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 6, 2011
@author: apple
'''
from flask import Module , render_template, request, session, redirect, url_for,flash
import usermodel as model
from usermodel import UserModel
user = Module(__name__ , name='user')
@user.route('/')
def index(info=''):
if '_id' in session:
user = model.getuser(session['_id'])
return render_template('user/profile.html', user=user)
return render_template('user/login.html')
@user.route('/register',methods = ['GET','POST'])
def register():
if '_id' in session:
return redirect(url_for('user.index'))
if request.method == 'GET':
return render_template('user/register.html')
else:
uid = request.form.get('email',None)
passwd = request.form.get('passwd',None)
agin = request.form.get('agpwd',None)
if not uid or not passwd or not agin:
flash('Please input email and password.')
return render_template('user/register.html')
if passwd != agin:
flash('Password error.')
return render_template('user/register.html')
result = model.setuser({'uid':uid,'passwd':passwd})
if not result:
flash('Uid error.')
return render_template('user/reigster.html')
session['_id'] = uid
return redirect(url_for('user.index'))
@user.route('/questions')
def questions():
if '_id' in session:
user = model.getuser(session['_id'])
questions = model.getuserquestions(session['_id'])
return render_template('user/questions.html',user=user,questions=questions)
return render_template('user/login.html')
@user.route('/answers')
def answers():
if '_id' in session:
user = model.getuser(session['_id'])
questions = model.getuseranswers(session['_id'])
return render_template('user/answers.html',user=user,questions=questions)
return render_template('user/login.html')
@user.route('/login', methods=['POST', 'GET'])
@user.route('/login/<int:errorcode>', methods=['POST', 'GET'])
def login(errorcode=0):
"""
login view
param:@type int:errorcode <defined in user.config>
"""
if errorcode:
return render_template('user/login.html')
if '_id' in session:
return redirect(url_for('user.index'))
_id = request.form.get('email', '')
passwd = request.form.get('passwd', '')
if not _id or not passwd:
return redirect(url_for('user.login', errorcode=1))
result = model.login(_id, passwd)
if result:
n = request.args.get('next', '')
nexturl = url_for('user.index') if not n else n
session['_id'] = _id
return redirect(nexturl)
else:
return redirect(url_for('user.login' , errorcode=2))
@user.route('/logout' , methods=['POST', 'GET'])
def logout():
session.pop('_id', None)
return redirect(url_for('question.index'))
| Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 6, 2011
@author: apple
'''
from flask import Module , render_template, request, session, redirect, url_for,flash
import usermodel as model
from usermodel import UserModel
user = Module(__name__ , name='user')
@user.route('/')
def index(info=''):
if '_id' in session:
user = model.getuser(session['_id'])
return render_template('user/profile.html', user=user)
return render_template('user/login.html')
@user.route('/register',methods = ['GET','POST'])
def register():
if '_id' in session:
return redirect(url_for('user.index'))
if request.method == 'GET':
return render_template('user/register.html')
else:
uid = request.form.get('email',None)
passwd = request.form.get('passwd',None)
agin = request.form.get('agpwd',None)
if not uid or not passwd or not agin:
flash('Please input email and password.')
return render_template('user/register.html')
if passwd != agin:
flash('Password error.')
return render_template('user/register.html')
result = model.setuser({'uid':uid,'passwd':passwd})
if not result:
flash('Uid error.')
return render_template('user/reigster.html')
session['_id'] = uid
return redirect(url_for('user.index'))
@user.route('/questions')
def questions():
if '_id' in session:
user = model.getuser(session['_id'])
questions = model.getuserquestions(session['_id'])
return render_template('user/questions.html',user=user,questions=questions)
return render_template('user/login.html')
@user.route('/answers')
def answers():
if '_id' in session:
user = model.getuser(session['_id'])
questions = model.getuseranswers(session['_id'])
return render_template('user/answers.html',user=user,questions=questions)
return render_template('user/login.html')
@user.route('/login', methods=['POST', 'GET'])
@user.route('/login/<int:errorcode>', methods=['POST', 'GET'])
def login(errorcode=0):
"""
login view
param:@type int:errorcode <defined in user.config>
"""
if errorcode:
return render_template('user/login.html')
if '_id' in session:
return redirect(url_for('user.index'))
_id = request.form.get('email', '')
passwd = request.form.get('passwd', '')
if not _id or not passwd:
return redirect(url_for('user.login', errorcode=1))
result = model.login(_id, passwd)
if result:
n = request.args.get('next', '')
nexturl = url_for('user.index') if not n else n
session['_id'] = _id
return redirect(nexturl)
else:
return redirect(url_for('user.login' , errorcode=2))
@user.route('/logout' , methods=['POST', 'GET'])
def logout():
session.pop('_id', None)
return redirect(url_for('question.index'))
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
usermodel.py
Created by AlanYang on 2011-06-04.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import redis
import cPickle
import hashlib
import pymongo
from flask import g
from mongomodel import MongoModel
from application import app
from apps.search.searchmodel import index
def login(uid,passwd):
user = UserModel()
return user.login(uid, passwd)
def changeuser(uid,user):
return UserModel().changeuser(uid,**user)
@index(['nick'])
def setuser(user):
print user
return UserModel(**user).commit()
def changepasswd(uid,oldpasswd,newpasswd):
return UserModel().changepasswd(uid, oldpasswd, newpasswd)
def getuser(uid):
return UserModel().getuser(uid)
@app.signal_addquestion.connect
def adduserquestion(sender,**kv):
uid = kv.pop('uid',None)
qid = kv.pop('qid',None)
if not uid or not qid:
return False
return UserModel().addquestion(uid, qid)
def getuserquestions(uid,skip=0,limit=10):
qs = UserModel().getquestions(uid, skip, limit)
return filter(lambda _q:not not _q,[getquestion(q) for q in qs])
@app.signal_addanswer.connect
def adduseranswer(sender,**kv):
uid = kv.pop('uid',None)
qid = kv.pop('qid',None)
answer = kv.pop('answer',None)
if not uid or not qid:
return False
return UserModel().addanswer(uid, qid, answer)
def getuseranswers(uid,skip=0,limit=10):
ans = UserModel().getanswers(uid, skip, limit)
return [app.signal_getquestion.send(__name__,qid=answer['qid'])[0][1] for answer in ans]
adduser = setuser
class UserModel(MongoModel):
def __init__(self,**user):
self.user = user
if not getattr(g, 'mongo',None):
g.mongo = pymongo.Connection(app.config['MONGO_HOST'],app.config['MONGO_PORT'])
if not getattr(g, 'redis',None):
g.redis = redis.Redis(app.config['REDIS_HOST'],app.config['REDIS_PORT'],db=app.config['DB_REDIS_USER'])
MongoModel.__init__(self, g.mongo.freeasker.user, '_id', g.redis, app.config['DEFAULT_EXPIRE'])
def __question_key__(self,uid):
return '%s:%s:%s'%('user','question',uid)
def __answer_key__(self,uid):
return '%s:%s:%s'%('user','answer',uid)
def __set_cache__(self, user):
# user.pop('passwd',None)
result = MongoModel.__set_cache__(self, user)
if result:
return self.cache.expire(user[self.pk],self.expire)
return False
def commit(self):
if not self.user:
self.lasterror = 'No user infomation.'
return False
return self.setuser(**self.user)
def getuser(self,uid):
return self.get(_id=uid)
def setuser(self,**user):
if not user.has_key('passwd') or not user.has_key('uid'):
self.lasterror = 'No user passwd or no user uid.at UserModel:setuser.'
return False
user[self.pk] = _id = user.pop('uid')
user['passwd'] = self.__cryptpasswd__(_id, user['passwd'])
result = self.save(**user)
if not result:
self.lasterror = 'The uid is sigined.'
self.db.ensure_index([('_id',pymongo.DESCENDING)] ,unique = True)
# if not SearchEngine(*[_id,]*3).insert():
# self.lasterror = 'index user \'%s\' fail'%_id
return result
def __cryptpasswd__(self,_id,passwd):
passwd = '%s<:>%s' % (_id, passwd)
return hashlib.md5(hashlib.md5(passwd).hexdigest()).hexdigest()
def login(self,uid,passwd):
passwd = self.__cryptpasswd__(uid, passwd)
where = { self.pk:uid , 'passwd':passwd}
return self.db.find(where).count() == 1
def changepasswd(self,uid,oldpasswd,newpasswd):
if self.login(uid, oldpasswd):
passwd = self.__cryptpasswd__(uid, newpasswd)
result = self.db.update({self.pk:uid},{'$set':{'passwd':passwd}},safe=True)
return result['updatedExisting']
self.lasterror = 'User oldpasswd error.at UserModel:changepasswd'
return False
def changeuser(self,uid,**user):
spec = { self.pk:uid }
return self.update(spec,user)
def search(self,s,skip=0,limit=10):
result = SearchEngine().search(s,skip=skip,limit=limit)
return filter(lambda _i:not not _i,[self.getuser(r) for r in result])
def addquestion(self,uid,qid):
where = { self.pk : uid}
result = self.db.update(where,{'$push':{'questions':qid}},safe=True)
self.cache.rpush(self.__question_key__(uid),qid)
return result['updatedExisting']
def getquestions(self,uid,skip=0,limit=10):
data = self.cache.lrange(self.__question_key__(uid),skip,limit)
if not data:
user = self.getuser(uid)
data = user.get('questions',[])
for i in data:
self.cache.rpush(self.__question_key__(uid),i)
return data
def addanswer(self,uid,qid,answer):
where = { self.pk:uid }
content = cPickle.dumps({'qid':qid,'answer':answer})
result = self.db.update(where,{'$push':{'answers':{'qid':qid,'answer':answer}}} , safe=True)
self.cache.rpush(self.__answer_key__(uid),content)
return result['updatedExisting']
def getanswers(self,uid,skip=0,limit=10):
data = self.cache.lrange(self.__answer_key__(uid),skip,limit)
if not data:
user = self.getuser(uid)
data = user.get('answers',[])
for i in data:
self.cache.rpush(self.__answer_key__(uid),cPickle.dumps(i))
return [cPickle.loads(d) for d in data]
if __name__ == '__main__':
with app.test_request_context():
# print getuseranswers('alan.yang@live.com', 0, 10)
# user = UserModel(uid = 'lo1veida@live.com' , passwd = '111' , nick = 'love')
# print user.commit()
# print user.getlasterror()
print user.setuser({'uid':'knift1128@gmail.com' , 'passwd':'111','nick':'love'})
# print user.login('knift1128@gmail.com', '222')
# print user.changeuser('knift1128@gmail.com' ,nick='ida')
# print user.changepasswd('knift1128@gmail.com', '111', '222')
# print setuser(_id='alan.yang@live.com', passwd='111', safe=True)
# print __cryptpasswd('aaa','aaa')
# print changeuser('alanyang54@gmail.com',nick='ida')
# print getuser('alanyang54@gmail.com')
# print login('alan.yang@live.com' , '111')
# print changepasswd('alanyang54@gmail.com', '111', '222')
# print changeuser('alanyang54@gmail.com', site='http://freeasker.com')
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
questionmodel.py
Created by AlanYang on 2011-06-04.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import os
import time
import types
import hashlib
import cPickle
import pymongo
import memcache
from flask import g
from mongomodel import MongoModel
from application import app
from apps.search.searchmodel import index,update
from apps.search import searchengine
@app.signal_getquestion.connect
def getquestionex(sender,**kv):
qid = kv.pop('qid',None)
return getquestion(qid) if qid else False
@app.signal_tagquestions.connect
def gettagquestions(sender,**kv):
if not kv.has_key('tag'):
return []
tag = kv.get('tag')
skip = kv.get('skip',0)
limit = kv.get('limit',10)
return QuestionModel().gettagquestions(tag=tag,skip=skip,limit=limit)
def getquestion(qid):
return QuestionModel().getquestion(qid)
@index(['title','summary','tags'],name='question')
def setquestion(question):
qid = QuestionModel(**question).commit()
if qid:
uid = question['uid']
_r = app.signal_addquestion.send(__name__,uid=uid,qid=qid)
return qid
def getnewestquestions(typeid=0,skip=0,limit=10,sort=pymongo.DESCENDING):
return QuestionModel().getnewestquestions(typeid, skip, limit, 'timestamp',sort)
def getunanswers(typeid=0,skip=0,limit=10,sort=pymongo.DESCENDING):
return QuestionModel().getunanswers(typeid, skip, limit, 'timestamp',sort)
def getunrightquestions(typeid=0,skip=0,limit=10,sort=pymongo.DESCENDING):
return QuestionModel().getunrightquestions(typeid, skip, limit, 'timestamp',sort)
@update(['answer','uid'],'$push','answers','question')
def setanswer(qid,answer):
result = QuestionModel().addanswer(qid,**answer)
if result:
uid = answer['uid']
_r = app.signal_addanswer.send(__name__,uid=uid,qid=qid,answer=answer['answer'])
return result
def searchquestion(s,skip=0,limit=10):
result = QuestionModel().search(s, skip, limit)
# raise TypeError,result
return result
class QuestionModel(MongoModel):
def __init__(self,**question):
self.question = question
if not getattr(g, 'mongo',None):
g.mongo = pymongo.Connection(app.config['MONGO_HOST'],app.config['MONGO_PORT'])
if not getattr(g, 'memcache',None):
g.memcache = memcache.Client(app.config['CACHE_MEMCACHED_SERVERS'])
MongoModel.__init__(self, g.mongo.freeasker.question, '_id', g.memcache, app.config['QUESTION_EXPIRE'])
def __set_cache__(self, question):
return MongoModel.__set_cache__(self, question,time=self.expire)
def __hash_question__(self,title):
if type(title) != types.StringType:
try:
title = title.encode('utf-8')
except Exception,e:
self.lasterror = 'encode question fail.%s'%e
return hashlib.md5(title).hexdigest()
def commit(self):
if not self.question:
self.lasterror = 'No question infomation.'
return False
return self.setquestion(**self.question)
def getquestion(self,qid):
return self.get(_id=qid)
def getnewestquestions(self,typeid=0,skip=0,limit=10,sortfield='timestamp',sortmethod=pymongo.DESCENDING):
where = {'typeid':typeid} if typeid else {}
return self.filter(where, skip, limit, sortfield,sortmethod)
def getunanswers(self,typeid=0,skip=0,limit=10,sortfield='timestamp',sortmethod=pymongo.DESCENDING):
where = { 'typeid':typeid , 'answers':{'$exists':False} } if typeid else { 'answers':{ '$exists':False } }
return self.filter(where, skip, limit, sortfield,sortmethod)
def getunrightquestions(self,typeid=0,skip=0,limit=10,sortfield='timestamp',sortmethod=pymongo.DESCENDING):
where = { 'typeid':typeid, 'rightanswer':{'$exists':False} } if typeid else { 'rightanswer':{'$exists':False} }
return self.filter(where, skip, limit, sortfield,sortmethod)
def gettagquestions(self,tag,skip=0,limit=10,sortfield='timestamp',sortmethod=pymongo.DESCENDING):
where = {'tags':tag}
return self.filter(where,skip,limit,sortfield,sortmethod)
def addanswer(self,qid,**answer):
if not answer.has_key('answer') or not answer.has_key('uid'):
return False
answer['timestamp'] = time.time()
if type(qid) != types.StringType:
qid = qid.encode('utf-8')
data = self.cache.get(qid)
if data:
data = cPickle.loads(data)
if data.has_key('answers'):
data['answers'].append(answer)
else:
data['answers'] = [answer,]
self.cache.set(qid,cPickle.dumps(data),time=self.expire)
res = self.db.update({'_id':qid}, {'$push':{'answers':answer}} , safe=True)
# if not SearchEngine(' '.join(( answer['uid'] , answer['answer'] )),qid,qid).insert():
# self.lasterror = 'Add answer index into searchengine fail. at QuestionModel:addanswer'
return res['updatedExisting']
def search(self,s,skip=0,limit=10):
return searchengine.search(s,skip=skip,limit=limit)
def setquestion(self,**question):
if not question.has_key('title') or not question.has_key('uid'):
return False
question['_id'] = self.__hash_question__(question['title'])
if 'tags' in question:
tags = question['tags']
for tag in tags:
app.signal_addtagscore.send(__name__,tag=tag)
self.db.ensure_index([('uid',pymongo.DESCENDING),
('title',pymongo.DESCENDING),
('tags',pymongo.DESCENDING)])
result = self.save(**question)
# raise TypeError,self.lasterror
# if not SearchEngine(searchindex,question['_id'],question).insert():
# self.lasterror = 'index question document \'%s\' fail'%question['title']
return result and question['_id'] or False
if __name__ == '__main__':
# print __isenglish('Key names in inserted documents are limited as follows')
# print __isenglish('广东广东')
# print __seg('Key names in inserted documents are limited as follows')
# print __seg('收集最最精品的各类淘宝皇冠店')
# print setquestion(uid='alanyang54@gmail.com',title='I love ida very much make love with him .. fuck him')
with app.test_request_context():
print setquestion(uid='alanyang54@gmail.com',title='I love ida very much make love with him .. fuck him')
print
for i in getnewestquestions():
print i
import time
begin = time.time()
result = searchquestion('ida very make love')
for r in result:print r['data'] , r['percent']
print time.time() - begin
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
questionmodel.py
Created by AlanYang on 2011-06-04.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import os
import time
import types
import hashlib
import cPickle
import pymongo
import memcache
from flask import g
from mongomodel import MongoModel
from application import app
from apps.search.searchmodel import index,update
from apps.search import searchengine
@app.signal_getquestion.connect
def getquestionex(sender,**kv):
qid = kv.pop('qid',None)
return getquestion(qid) if qid else False
@app.signal_tagquestions.connect
def gettagquestions(sender,**kv):
if not kv.has_key('tag'):
return []
tag = kv.get('tag')
skip = kv.get('skip',0)
limit = kv.get('limit',10)
return QuestionModel().gettagquestions(tag=tag,skip=skip,limit=limit)
def getquestion(qid):
return QuestionModel().getquestion(qid)
@index(['title','summary','tags'],name='question')
def setquestion(question):
qid = QuestionModel(**question).commit()
if qid:
uid = question['uid']
_r = app.signal_addquestion.send(__name__,uid=uid,qid=qid)
return qid
def getnewestquestions(typeid=0,skip=0,limit=10,sort=pymongo.DESCENDING):
return QuestionModel().getnewestquestions(typeid, skip, limit, 'timestamp',sort)
def getunanswers(typeid=0,skip=0,limit=10,sort=pymongo.DESCENDING):
return QuestionModel().getunanswers(typeid, skip, limit, 'timestamp',sort)
def getunrightquestions(typeid=0,skip=0,limit=10,sort=pymongo.DESCENDING):
return QuestionModel().getunrightquestions(typeid, skip, limit, 'timestamp',sort)
@update(['answer','uid'],'$push','answers','question')
def setanswer(qid,answer):
result = QuestionModel().addanswer(qid,**answer)
if result:
uid = answer['uid']
_r = app.signal_addanswer.send(__name__,uid=uid,qid=qid,answer=answer['answer'])
return result
def searchquestion(s,skip=0,limit=10):
result = QuestionModel().search(s, skip, limit)
# raise TypeError,result
return result
class QuestionModel(MongoModel):
def __init__(self,**question):
self.question = question
if not getattr(g, 'mongo',None):
g.mongo = pymongo.Connection(app.config['MONGO_HOST'],app.config['MONGO_PORT'])
if not getattr(g, 'memcache',None):
g.memcache = memcache.Client(app.config['CACHE_MEMCACHED_SERVERS'])
MongoModel.__init__(self, g.mongo.freeasker.question, '_id', g.memcache, app.config['QUESTION_EXPIRE'])
def __set_cache__(self, question):
return MongoModel.__set_cache__(self, question,time=self.expire)
def __hash_question__(self,title):
if type(title) != types.StringType:
try:
title = title.encode('utf-8')
except Exception,e:
self.lasterror = 'encode question fail.%s'%e
return hashlib.md5(title).hexdigest()
def commit(self):
if not self.question:
self.lasterror = 'No question infomation.'
return False
return self.setquestion(**self.question)
def getquestion(self,qid):
return self.get(_id=qid)
def getnewestquestions(self,typeid=0,skip=0,limit=10,sortfield='timestamp',sortmethod=pymongo.DESCENDING):
where = {'typeid':typeid} if typeid else {}
return self.filter(where, skip, limit, sortfield,sortmethod)
def getunanswers(self,typeid=0,skip=0,limit=10,sortfield='timestamp',sortmethod=pymongo.DESCENDING):
where = { 'typeid':typeid , 'answers':{'$exists':False} } if typeid else { 'answers':{ '$exists':False } }
return self.filter(where, skip, limit, sortfield,sortmethod)
def getunrightquestions(self,typeid=0,skip=0,limit=10,sortfield='timestamp',sortmethod=pymongo.DESCENDING):
where = { 'typeid':typeid, 'rightanswer':{'$exists':False} } if typeid else { 'rightanswer':{'$exists':False} }
return self.filter(where, skip, limit, sortfield,sortmethod)
def gettagquestions(self,tag,skip=0,limit=10,sortfield='timestamp',sortmethod=pymongo.DESCENDING):
where = {'tags':tag}
return self.filter(where,skip,limit,sortfield,sortmethod)
def addanswer(self,qid,**answer):
if not answer.has_key('answer') or not answer.has_key('uid'):
return False
answer['timestamp'] = time.time()
if type(qid) != types.StringType:
qid = qid.encode('utf-8')
data = self.cache.get(qid)
if data:
data = cPickle.loads(data)
if data.has_key('answers'):
data['answers'].append(answer)
else:
data['answers'] = [answer,]
self.cache.set(qid,cPickle.dumps(data),time=self.expire)
res = self.db.update({'_id':qid}, {'$push':{'answers':answer}} , safe=True)
# if not SearchEngine(' '.join(( answer['uid'] , answer['answer'] )),qid,qid).insert():
# self.lasterror = 'Add answer index into searchengine fail. at QuestionModel:addanswer'
return res['updatedExisting']
def search(self,s,skip=0,limit=10):
return searchengine.search(s,skip=skip,limit=limit)
def setquestion(self,**question):
if not question.has_key('title') or not question.has_key('uid'):
return False
question['_id'] = self.__hash_question__(question['title'])
if 'tags' in question:
tags = question['tags']
for tag in tags:
app.signal_addtagscore.send(__name__,tag=tag)
self.db.ensure_index([('uid',pymongo.DESCENDING),
('title',pymongo.DESCENDING),
('tags',pymongo.DESCENDING)])
result = self.save(**question)
# raise TypeError,self.lasterror
# if not SearchEngine(searchindex,question['_id'],question).insert():
# self.lasterror = 'index question document \'%s\' fail'%question['title']
return result and question['_id'] or False
if __name__ == '__main__':
# print __isenglish('Key names in inserted documents are limited as follows')
# print __isenglish('广东广东')
# print __seg('Key names in inserted documents are limited as follows')
# print __seg('收集最最精品的各类淘宝皇冠店')
# print setquestion(uid='alanyang54@gmail.com',title='I love ida very much make love with him .. fuck him')
with app.test_request_context():
print setquestion(uid='alanyang54@gmail.com',title='I love ida very much make love with him .. fuck him')
print
for i in getnewestquestions():
print i
import time
begin = time.time()
result = searchquestion('ida very make love')
for r in result:print r['data'] , r['percent']
print time.time() - begin
| Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 6, 2011
@author: apple
'''
from flask import Module, render_template, request, jsonify , flash , session, redirect, url_for
from questionmodel import setquestion, getquestion, searchquestion , getnewestquestions
import questionmodel as model
from apps.user.usermodel import getuser
from application import cache,app
question = Module(__name__ , name='question')
@question.route('/q')
def ask():
if not '_id' in session:
return redirect(url_for('user.login'))
_,catalogs = app.signal_gettopcatalog.send(__name__)[0]
return render_template('question/ask.html',cs = catalogs)
@question.route('/summit', methods=['POST', 'GET'])
def summit():
title = request.form.get('title', '')
summary = request.form.get('summary', '')
tags = [t.lower() for t in request.form.get('tags','').split()]
_id = setquestion({'uid':'alan.yang@live.com' , 'title':title , 'summary':summary,'tags':tags})
return render_template('question/answer.html', _id=_id)
@question.route('/d/<qid>')
def detail(qid, errorcode=0):
if errorcode:
return render_template('question/detail.html')
question = getquestion(qid)
uid = session.get('uid', None)
user = getuser(uid) if uid else None
return render_template('question/detail.html' , question=question , user=user)
@question.route('/s')
def search():
kw = request.args.get('q', None)
if not kw:
return render_template('question/search.html')
questions = searchquestion(kw)
return '<br />'.join(['<h3>%s</h3><br>' % (q['data']['title']) for q in questions])
@question.route('/')
#@cache.cached(timeout=120)
def index():
qs = getnewestquestions(limit=100)
flash('has questions %s'%len(model.QuestionModel()))
return render_template('question/index.html', qs=qs)
@question.route('/a' , methods=['POST'])
def answer():
_id = request.form.get('_id', None)
answer = request.form.get('answer', None)
if not _id or not answer:
return redirect(url_for('question.index'))
uid = session.get('_id', None)
if not uid:
return redirect(url_for('user.login', next=url_for('question.detail', qid=_id)))
result = model.setanswer(_id, {'answer':answer, 'uid':uid})
if not result:
return '2'
return redirect(url_for('question.detail', qid=_id, errorcode=2))
return redirect(url_for('question.detail', qid=_id))
| Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 6, 2011
@author: apple
'''
from flask import Module, render_template, request, jsonify , flash , session, redirect, url_for
from questionmodel import setquestion, getquestion, searchquestion , getnewestquestions
import questionmodel as model
from apps.user.usermodel import getuser
from application import cache,app
question = Module(__name__ , name='question')
@question.route('/q')
def ask():
if not '_id' in session:
return redirect(url_for('user.login'))
_,catalogs = app.signal_gettopcatalog.send(__name__)[0]
return render_template('question/ask.html',cs = catalogs)
@question.route('/summit', methods=['POST', 'GET'])
def summit():
title = request.form.get('title', '')
summary = request.form.get('summary', '')
tags = [t.lower() for t in request.form.get('tags','').split()]
_id = setquestion({'uid':'alan.yang@live.com' , 'title':title , 'summary':summary,'tags':tags})
return render_template('question/answer.html', _id=_id)
@question.route('/d/<qid>')
def detail(qid, errorcode=0):
if errorcode:
return render_template('question/detail.html')
question = getquestion(qid)
uid = session.get('uid', None)
user = getuser(uid) if uid else None
return render_template('question/detail.html' , question=question , user=user)
@question.route('/s')
def search():
kw = request.args.get('q', None)
if not kw:
return render_template('question/search.html')
questions = searchquestion(kw)
return '<br />'.join(['<h3>%s</h3><br>' % (q['data']['title']) for q in questions])
@question.route('/')
#@cache.cached(timeout=120)
def index():
qs = getnewestquestions(limit=100)
flash('has questions %s'%len(model.QuestionModel()))
return render_template('question/index.html', qs=qs)
@question.route('/a' , methods=['POST'])
def answer():
_id = request.form.get('_id', None)
answer = request.form.get('answer', None)
if not _id or not answer:
return redirect(url_for('question.index'))
uid = session.get('_id', None)
if not uid:
return redirect(url_for('user.login', next=url_for('question.detail', qid=_id)))
result = model.setanswer(_id, {'answer':answer, 'uid':uid})
if not result:
return '2'
return redirect(url_for('question.detail', qid=_id, errorcode=2))
return redirect(url_for('question.detail', qid=_id))
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
tagmodel.py
Created by AlanYang on 2011-06-20.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import sys
import redis
import pymongo
from flask import g
from mongomodel import MongoModel
from application import app
@app.signal_addtagscore.connect
def bumptag(sender,**kv):
tag = kv.pop('tag',None)
if not tag:return False
return TagModel().addtag(tag)
def gettags(skip=0,limit=10):
model = TagModel()
return model.gettags(skip,limit)
def gethottags(skip=0,limit=10):
return TagModel().getsortedtags(skip,limit)
def getdetail(tag):
return TagModel().getdetail(tag)
def getquestions(tag,skip=0,limit=10):
return TagModel().gettagquestions(tag,skip,limit)
class TagModel(MongoModel):
"""
tag name in redis zset
tag info in mongo cache to redis
"""
def __init__(self):
if not getattr(g, 'mongo',None):
g.mongo = pymongo.Connection(app.config['MONGO_HOST'],app.config['MONGO_PORT'])
if not getattr(g, 'redis',None):
g.redis = redis.Redis(app.config['REDIS_HOST'],app.config['REDIS_PORT'],db = app.config['DB_REDIS_TAG'])
self.__key = 'freeasker:tag:collection'
self.qset = g.redis
MongoModel.__init__(self,g.mongo.freeasker.tag,'_id',g.redis,app.config['TAG_EXPIRE'])
def addtag(self,tag,cnt=1,**kv):
if kv.has_key('descript'):
kv[self.pk] = tag
result = self.save(**kv)
return self.qset.zincrby(self.__key,tag,cnt)
def gettags(self,skip=0,limit=10):
return self.qset.zrange(self.__key,skip,limit)
# return self.qset.zrangebyscore(self.__key,0,sys.maxint,start=skip,num=limit)
def getsortedtags(self,skip=0,limit=10):
return self.qset.zrevrange(self.__key,skip,limit,True)
def getdetail(self,tag):
return self.get(**{self.pk:tag})
def gettagquestions(self,tag,skip=0,limit=10):
_,questions = app.signal_tagquestions.send(__name__,tag = tag,skip = skip,limit = limit)[0]
return questions
def __len__(self):
return self.qset.zcard(self.__key)
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
tagmodel.py
Created by AlanYang on 2011-06-20.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import sys
import redis
import pymongo
from flask import g
from mongomodel import MongoModel
from application import app
@app.signal_addtagscore.connect
def bumptag(sender,**kv):
tag = kv.pop('tag',None)
if not tag:return False
return TagModel().addtag(tag)
def gettags(skip=0,limit=10):
model = TagModel()
return model.gettags(skip,limit)
def gethottags(skip=0,limit=10):
return TagModel().getsortedtags(skip,limit)
def getdetail(tag):
return TagModel().getdetail(tag)
def getquestions(tag,skip=0,limit=10):
return TagModel().gettagquestions(tag,skip,limit)
class TagModel(MongoModel):
"""
tag name in redis zset
tag info in mongo cache to redis
"""
def __init__(self):
if not getattr(g, 'mongo',None):
g.mongo = pymongo.Connection(app.config['MONGO_HOST'],app.config['MONGO_PORT'])
if not getattr(g, 'redis',None):
g.redis = redis.Redis(app.config['REDIS_HOST'],app.config['REDIS_PORT'],db = app.config['DB_REDIS_TAG'])
self.__key = 'freeasker:tag:collection'
self.qset = g.redis
MongoModel.__init__(self,g.mongo.freeasker.tag,'_id',g.redis,app.config['TAG_EXPIRE'])
def addtag(self,tag,cnt=1,**kv):
if kv.has_key('descript'):
kv[self.pk] = tag
result = self.save(**kv)
return self.qset.zincrby(self.__key,tag,cnt)
def gettags(self,skip=0,limit=10):
return self.qset.zrange(self.__key,skip,limit)
# return self.qset.zrangebyscore(self.__key,0,sys.maxint,start=skip,num=limit)
def getsortedtags(self,skip=0,limit=10):
return self.qset.zrevrange(self.__key,skip,limit,True)
def getdetail(self,tag):
return self.get(**{self.pk:tag})
def gettagquestions(self,tag,skip=0,limit=10):
_,questions = app.signal_tagquestions.send(__name__,tag = tag,skip = skip,limit = limit)[0]
return questions
def __len__(self):
return self.qset.zcard(self.__key)
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
__init__.py
Created by AlanYang on 2011-06-17.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
views.py
Created by AlanYang on 2011-06-23.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
from flask import Module,flash,render_template
import tagmodel
tag = Module(__name__,name='tag')
@tag.route('/')
@tag.route('/<int:page>')
def index(page=1):
skip = (page-1)*10
tags = tagmodel.gethottags(skip,10)
flash('browse tag list')
flash('has tags %s'%len(tagmodel.TagModel()))
return render_template('tag/index.html',tags=tags)
@tag.route('/list/<tag>/<int:page>')
def questions(tag,page):
skip = (page-1)*10
flash('tag %s questions'%tag)
questions = tagmodel.getquestions(tag,skip,10)
return render_template('question/index.html',qs=questions) | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
views.py
Created by AlanYang on 2011-06-23.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
from flask import Module,flash,render_template
import tagmodel
tag = Module(__name__,name='tag')
@tag.route('/')
@tag.route('/<int:page>')
def index(page=1):
skip = (page-1)*10
tags = tagmodel.gethottags(skip,10)
flash('browse tag list')
flash('has tags %s'%len(tagmodel.TagModel()))
return render_template('tag/index.html',tags=tags)
@tag.route('/list/<tag>/<int:page>')
def questions(tag,page):
skip = (page-1)*10
flash('tag %s questions'%tag)
questions = tagmodel.getquestions(tag,skip,10)
return render_template('question/index.html',qs=questions) | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
__init__.py
Created by AlanYang on 2011-06-17.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
__init__.py
Created by AlanYang on 2011-06-21.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
| Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 6, 2011
@author: apple
'''
from flask import Module,request,render_template,flash,redirect,url_for,session
import catalogmodel as model
catalog = Module(__name__,name='catalog')
@catalog.route('/')
def index():
cs = model.get_top_catalog()
return render_template('catalog/index.html',cs = cs)
@catalog.route('/add',methods = ['POST','GET'])
def add():
if request.method == 'GET':
return render_template('catalog/add.html')
elif request.method == 'POST':
if not '_id' in session:
return redirect(url_for('user.login'))
#assert 0,str(dir(request.form))
catalog = dict(filter(lambda _i:_i[1],zip(request.form.keys(),request.form.values())))
catalog['creator'] = session['_id']
assert catalog,str(catalog)
result = model.add_catalog(catalog)
if not result:
flash('add catalog fail.')
return render_template('catalog/add.html')
else:
return redirect(url_for('catalog.index')) | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
catalogmodel.py
Created by AlanYang on 2011-06-21.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import time,pymongo
from flask import g
from mongomodel import MongoModel
from application import app
def add_catalog(catalog):
return 'parent' in catalog and add_sub_catalog(catalog) or add_top_catalog(catalog)
def add_top_catalog(catalog):
assert type(catalog) == dict
if type(catalog) != dict:
return False
return CatalogModel().add_top_catalog(**catalog)
def add_sub_catalog(sub):
if type(sub) != dict:
return False
return CatalogModel().add_sub_catalog(**sub)
@app.signal_gettopcatalog.connect
def get_top_catalog(sender):
return CatalogModel().get_top_catalog()
def get_sub_catalog(parent):
return CatalogModel().get_sub_catalog(parent)
class CatalogModel(MongoModel):
"""
{'_id':,'name':,'descript':,'creator':,'time':,'parent':,'level':}
"""
top_level = 0
def __init__(self):
if not getattr(g, 'mongo',None):
g.mongo = pymongo.Connection(app.config['MONGO_HOST'],app.config['MONGO_PORT'])
MongoModel.__init__(self, g.mongo.freeasker.catalog, '_id', None, 0)
def add_top_catalog(self,**catalog):
if not 'name' in catalog or not 'creator' in catalog:
assert 0,'top'
return False
catalog['_id'] = catalog['name']
catalog['level'] = self.top_level
catalog['time']= time.time()
return self.save(**catalog)
def add_sub_catalog(self,**sub):
"""
"""
if not 'name' in sub or not 'parent' in sub or not 'level' in sub or not 'creator' in sub:
assert 0,'sub'
return False
assert sub['level'],'sub catalog level error,value is %s'%sub['level']
sub['_id'] = sub['name']
sub['time']= time.time()
return self.save(**sub)
def get_top_catalog(self):
where = {'level':self.top_level}
return self.db.find(where)
def get_sub_catalog(self,parent):
where = {'parent':parent}
return self.db.find(where) | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
catalogmodel.py
Created by AlanYang on 2011-06-21.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import time,pymongo
from flask import g
from mongomodel import MongoModel
from application import app
def add_catalog(catalog):
return 'parent' in catalog and add_sub_catalog(catalog) or add_top_catalog(catalog)
def add_top_catalog(catalog):
assert type(catalog) == dict
if type(catalog) != dict:
return False
return CatalogModel().add_top_catalog(**catalog)
def add_sub_catalog(sub):
if type(sub) != dict:
return False
return CatalogModel().add_sub_catalog(**sub)
@app.signal_gettopcatalog.connect
def get_top_catalog(sender):
return CatalogModel().get_top_catalog()
def get_sub_catalog(parent):
return CatalogModel().get_sub_catalog(parent)
class CatalogModel(MongoModel):
"""
{'_id':,'name':,'descript':,'creator':,'time':,'parent':,'level':}
"""
top_level = 0
def __init__(self):
if not getattr(g, 'mongo',None):
g.mongo = pymongo.Connection(app.config['MONGO_HOST'],app.config['MONGO_PORT'])
MongoModel.__init__(self, g.mongo.freeasker.catalog, '_id', None, 0)
def add_top_catalog(self,**catalog):
if not 'name' in catalog or not 'creator' in catalog:
assert 0,'top'
return False
catalog['_id'] = catalog['name']
catalog['level'] = self.top_level
catalog['time']= time.time()
return self.save(**catalog)
def add_sub_catalog(self,**sub):
"""
"""
if not 'name' in sub or not 'parent' in sub or not 'level' in sub or not 'creator' in sub:
assert 0,'sub'
return False
assert sub['level'],'sub catalog level error,value is %s'%sub['level']
sub['_id'] = sub['name']
sub['time']= time.time()
return self.save(**sub)
def get_top_catalog(self):
where = {'level':self.top_level}
return self.db.find(where)
def get_sub_catalog(self,parent):
where = {'parent':parent}
return self.db.find(where) | Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 6, 2011
@author: apple
'''
from flask import Module,request,render_template,flash,redirect,url_for,session
import catalogmodel as model
catalog = Module(__name__,name='catalog')
@catalog.route('/')
def index():
cs = model.get_top_catalog()
return render_template('catalog/index.html',cs = cs)
@catalog.route('/add',methods = ['POST','GET'])
def add():
if request.method == 'GET':
return render_template('catalog/add.html')
elif request.method == 'POST':
if not '_id' in session:
return redirect(url_for('user.login'))
#assert 0,str(dir(request.form))
catalog = dict(filter(lambda _i:_i[1],zip(request.form.keys(),request.form.values())))
catalog['creator'] = session['_id']
assert catalog,str(catalog)
result = model.add_catalog(catalog)
if not result:
flash('add catalog fail.')
return render_template('catalog/add.html')
else:
return redirect(url_for('catalog.index')) | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
__init__.py
Created by AlanYang on 2011-06-21.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
| Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 16, 2011
@author: apple
'''
import types
import cPickle
import xapian
from mmseg.search import seg_txt_2_dict
try:
from config import SEARCH_DB_PATH
except Exception, e:
import sys
if sys.platform.startswith('win'):
SEARCH_DB_PATH = 'E:\\xapiandb'
elif sys.platform.startswith('darwin'):
SEARCH_DB_PATH = '/Users/apple/Project/xapiandb'
else:
SEARCH_DB_PATH = '/var/xapiandata'
all = ['SEARCH_DB_PATH' , '__isenglish__' , 'SearchConnection' , 'IndexConnection']
AND = xapian.Query.OP_AND
OR = xapian.Query.OP_OR
AND_NOT = xapian.Query.OP_AND_NOT
XOR = xapian.Query.OP_XOR
OP_AND_MAYBE = xapian.Query.OP_AND_MAYBE
OP_FILTER = xapian.Query.OP_FILTER
OP_NEAR = xapian.Query.OP_NEAR
OP_PHRASE = xapian.Query.OP_PHRASE
OP_VALUE_RANGE = xapian.Query.OP_VALUE_RANGE
OP_SCALE_WEIGHT= xapian.Query.OP_SCALE_WEIGHT
OP_ELITE_SET = xapian.Query.OP_ELITE_SET
OP_VALUE_GE = xapian.Query.OP_VALUE_GE
OP_VALUE_LE = xapian.Query.OP_VALUE_LE
Query = xapian.Query
"""
IndexConnection + WriteConnection
"""
def index(indexfield,document,docid=None):
"""
Write and index document
if has a docid then update the document and reindex
"""
if type(indexfield) != types.ListType or type(document) != types.DictType:
return False
conn = WriteConnection(docid=docid,**document)
return conn.commit(*indexfield)
def search(s,unserialize=True,skip=0,limit=10):
"""
Simple search interface
"""
# raise TypeError,s+repr(type(s))
try:
s = s.encode('utf-8')
except:
pass
return SearchConnection().search(s,unserialize=unserialize,skip=skip,limit=limit)
def update(docid,opera,indexfield,doc):
return WriteConnection().update(docid,opera,indexfield,**doc)
def delete(docid):
return WriteConnection().delete(docid)
def raw_read(docid):
"""
Read a document by id
"""
return ReadConnection().read(docid,unserialize=True)
def raw_write(doc):
"""
Write a document
but the function don't index the document
"""
serialize = ( type(doc) != types.StringType )
return WriteConnection().write(doc,serialize=serialize)
def raw_replace(docid,doc):
"""
Simple update interface
replace the document
the function don't reindex .
"""
serialize = ( type(doc) != types.StringType )
return WriteConnection().replace(docid,doc,serialize=serialize)
def __isenglish__(s):
"""docstring for __"""
if not s : return True
s = s.strip()
return ord(s[0]) < 127
def __is_string__(s):
"""docstring for __is_string__"""
return type(s) == types.StringType or type(s) == types.UnicodeType
def __format_index__(fields,obj):
"""docstring for __format_index__"""
for i in fields:
field = obj.get(i,None)
if not __is_string__(field):
obj[i] = __flat__(field)
return obj
def __flat__(field):
result = []
if type(field) == types.ListType:
for i in field:
if not __is_string__(i):
result.extend(__flat__(i))
else:
result.append(i)
elif type(field) == types.DictType:
for k,v in field.items():
if not __is_string__(v):
result.extend(__flat__(v))
else:
result.append(v)
elif __is_string__(field):
result.append(field)
return ' '.join(result)
class Connection(object):
def __init__(self,path=None):
self.lasterror = ''
self.path = path or SEARCH_DB_PATH
self.prefix = 'Z%s'
@property
def error(self):
return self.lasterror
class SearchConnection(Connection):
def __init__(self,path=None,**kv):
Connection.__init__(self,path)
try:
self.db = xapian.Database(self.path)
except:
self.db = None
self.lasterror = 'Database error.'
raise xapian.DatabaseError,self.lasterror
def __del__(self):
try:
self.db.close()
except:
pass
def search(self,keywords,**kv):
"""
keywords type is list or string
if the's string auto seg
kv skip int limit,unserialize boolean
"""
if not self.db:
return []
skip = kv.pop('skip',0)
limit = kv.pop('limit',10)
enquire = xapian.Enquire(self.db)
query = self.__buildquery__(keywords,**kv)
# raise TypeError,repr(str(query))
# raise TypeError,repr(query.get_length())
try:
enquire.set_query(query)
result = enquire.get_mset(skip,limit)
unserialize = kv.pop('unserialize',None)
decode = cPickle.loads if unserialize else lambda x:x
return [{'detail':r,'data':decode(r.document.get_data())} for r in result]
except Exception,e:
raise TypeError,'query error.%s'%e
return []
def query(self,*querys,**kv):
if not self.db:
self.lasterror = 'Database error.'
return []
for q in querys:
if not isinstance(Query,q):
self.lasterror = 'Query type error.'
return []
skip = kv.pop('skip',0)
limit = kv.pop('limit',10)
opera = kv.pop('opera',OR)
unserialize = kv.pop('unserialize',None)
decode = cPickle.loads if unserialize else lambda x:x
enquire = xapian.Enquire(self.db)
query = Query(opera,querys)
return [{'detail':r,'data':decode(r.document.get_data())} for r in enquire.get_mset(skip,limit)]
def __buildquery__(self,keywords,**kv):
if type(keywords) == types.UnicodeType:
keywords = keywords.encode('utf-8')
if type(keywords) == types.StringType:
return self.__buildenglish__(keywords,**kv) if __isenglish__(keywords) else self.__buildchinese__(keywords,**kv)
elif type(keywords) == types.ListType:
opera = kv.pop('opera',OR)
return xapian.Query(self.prefix%opera,[xapian.Query(i) for i in keywords])
elif type(keywords) == types.DictType:
opera = kv.pop('opera',OR)
return xapian.Query(opera,[xapian.Query(self.prefix%word,value) for word,value, in keywords.items()])
else:
self.lasterror = 'keywords type error.'
raise TypeError,'keywords must is string or list or dict of keyword:value'
def __buildenglish__(self,keywords,**kv):
strategy = kv.pop('strategy',xapian.QueryParser.STEM_SOME)
qp = xapian.QueryParser()
qp.set_database(self.db)
stemmer = xapian.Stem('english')
qp.set_stemmer(stemmer)
qp.set_stemming_strategy(strategy)
qp.set_stemming_strategy(xapian.QueryParser.STEM_SOME)
query = qp.parse_query(keywords)
return query
def __buildchinese__(self,keywords,**kv):
opera = kv.pop('opera',OR)
#add a prefix
querys = [xapian.Query(self.prefix%word,value) for word,value in seg_txt_2_dict(keywords).iteritems()]
return querys[0] if len(querys) == 1 else xapian.Query(opera,querys)
class ReadConnection(Connection):
"""
Read document by id
"""
def __init__(self, path=None):
Connection.__init__(self,path)
try:
self.db = xapian.Database(self.path)
except:
self.db = None
self.lasterror = 'Database error.'
raise xapian.DatabaseError,self.lasterror
def read(self,docid,**kv):
if not self.db:
self.lasterror = 'Database error.'
return {}
decode = cPickle.loads if kv.get('unserialize',False) else lambda x:x
try:
doc = self.db.get_document(docid).get_data()
return decode(doc)
except Exception, e:
self.lasterror = 'Read error.%s'%e
return {}
def get_raw(self,docid):
if not self.db:
self.lasterror = 'Database error.'
return None
try:
return self.db.get_document(docid)
except Exception, e:
self.lasterror = 'Read error.%s'%e
return None
class WriteConnection(Connection):
"""
Write document
"""
def __init__(self, path=None,docid=None,**document):
Connection.__init__(self,path)
if docid:
self.doc = ReadConnection().read(docid)
else:
self.doc = {}
if type(self.doc) != types.DictType:
try:
doc = cPickle.loads(doc)
except Exception, e:
self.lasterror = 'Unserialize document error.'
self.docid,self.doc = None,{}
self.docid = docid
self.doc.update(document)
try:
self.db = xapian.WritableDatabase(self.path,xapian.DB_CREATE_OR_OPEN)
except Exception,e:
self.db = None
self.lasterror = 'Database error.%s'%e
raise xapian.DatabaseError,self.lasterror
def __make_index__(self,*fields):
if not self.doc:
self.lasterror = 'Database error.'
return False
self.indexfield = []
for i in fields:
field = self.doc.get(i,None)
if __is_string__(field):
self.indexfield.append(field.encode('utf-8'))
else:
self.indexfield.append(__flat__(field).encode('utf-8'))
def update(self,docid,opera,indexfield,**doc):
"""
"""
if not self.db:
self.lasterror = 'Database error.'
return False
if not docid:
self.lasterror = 'Empty document id.'
return False
if not doc:
self.lasterror = 'Empty document.'
return False
for k,v in doc.items():
self.__update(docid,opera,indexfield,{k:v})
try:
self.db.flush()
return True
except Exception,e:
self.lasterror = 'Flush Database error.%s'%e
return False
def __update(self,docid,opera,indexfield,doc):
document = ReadConnection().get_raw(docid)
old = cPickle.loads(document.get_data())
if opera == '$set':
# self.indexfield = [doc.get(i,'').encode('utf-8') for i in indexfield]
self.doc = doc
self.__make_index__(*indexfield)
old.update(doc)
elif opera == '$push':
olditem = old.get(doc.keys()[0],[])
# self.indexfield = [doc.values()[0].get(i,'').encode('utf-8') for i in indexfield]
self.doc = doc.values()[0]
self.__make_index__(*indexfield)
olditem.append(doc.values()[0])
old[doc.keys()[0]] = olditem
document.set_data(cPickle.dumps(old))
indexer = xapian.TermGenerator()
indexer.set_stemmer(xapian.Stem('english'))
indexer.index_text(' '.join(self.indexfield))
for i in indexer.get_document().termlist():
document.add_term(i.term)
self.db.replace_document(docid,document)
return self.docid
def commit(self,*indexfield):
if not self.doc:
self.lasterror = 'Empty document.'
return False
self.__make_index__(*indexfield)
return self.__insert__(self.indexfield)
def __insert__(self,indexfield):
if type(indexfield) != types.ListType:
self.lasterror = 'Indexfield type error.'
return False
if not len(indexfield):
self.lasterror = 'Empty indexfield.'
return False
return self.__insertenglish__(indexfield) if __isenglish__(indexfield[0]) else self.__insertchinese__(indexfield)
def __insertenglish__(self,indexfield):
indexer = xapian.TermGenerator()
stemmer = xapian.Stem("english")
indexer.set_stemmer(stemmer)
document = xapian.Document()
document.set_data(cPickle.dumps(self.doc))
indexer.set_document(document)
indexer.index_text(' '.join(indexfield))
try:
if self.docid:
self.db.replace_document(self.docid,document)
self.db.flush()
return self.docid
else:
docid = self.db.add_document(document)
self.db.flush()
return docid
except Exception,e:
print e
self.lasterror = 'Flush db error.'
return False
def __insertchinese__(self, indexfield):
document = xapian.Document()
document.set_data(cPickle.dumps(self.doc))
words = ' '.join(indexfield)
for word,value in seg_txt_2_dict(words).iteritems():
document.add_term(self.prefix%word,value)
try:
if self.docid:
self.db.replace_document(self.docid,document)
self.db.flush()
return self.docid
else:
docid = self.db.add_document(document)
self.db.flush()
return docid
except Exception,e:
self.lasterror = 'Flush db error.%s'%e
return False
def index(self,document,indexfield=[]):
if indexfield and type(indexfield) != types.ListType:
self.lasterror = 'Indexfield type error.'
return False
self.doc = document if type(document) == types.StringType else cPickle.dumps(document)
return self.commit(*indexfield)
def write(self,doc,**kv):
if not self.db:
self.lasterror = 'Database error.'
return False
encode = cPickle.dumps if kv.get('serialize',False) else lambda x:x
try:
docid = self.db.add_document(encode(doc))
self.db.flush()
# Process(target=self.db.flush,args=()).start()
return docid
except Exception, e:
self.lasterror = 'Write error.%s'%e
return False
def replace(self,docid,doc,**kv):
if not self.db:
self.lasterror = 'Database error.'
return False
encode = cPickle.dumps if kv.get('serialize',False) else lambda x:x
try:
document = xapian.Document()
document.set_data(encode(doc))
self.db.replace_document(docid,document)
self.db.flush()
# Process(target=self.db.flush,args=()).start()
return docid
except Exception, e:
self.lasterror = 'Write error.%s'%e
return False
def delete(self,docid):
"""
delete document
"""
if not self.db:
self.lasterror = 'Database error.'
return False
try:
return self.db.delete_document(docid)
except Exception, e:
self.lasterror = 'Delect error.%s'%e
return False
def __len__(self):
if not self.db:
self.lasterror = 'Database error.'
return False
try:
return self.db.get_doccount()
except Exception, e:
self.lasterror = 'Count error.%s'%e
return False
if __name__ == '__main__':
print delete(72)
# wc = WriteConnection()
# print wc.update(9,'$push',['answer','uid'],answers={'answer':'pointing to start of positionlist','uid':'ida'})
# print wc.error
# print update(9,'$push',['answer','uid'],{'answers':{'answer':'pointing to start of positionlist','uid':'ida'}})
# print raw_read(9)
# print raw_replace(73,{'author':'joson','title':'footbar golang','summary':'italy usa tokyo11','extra':'alan'})
# print search('联系')
# print raw_read(73)
# print index(['author','title','summary'],{'author':'青少年','title':'上赛季利物浦获得了联赛的第六名,新赛季他们的目标应该是进入前四,锁定一个欧冠联赛的席位,不过杰拉德表示,红军在传统上是冠军级别的队伍,他们的目标会更高。','summary':'italy usa tokyo11','extra':'alan'})
# print SearchConnection().search('精英赛',unserialize=True,opera=OR) | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
searchmodel.py
Created by AlanYang on 2011-06-20.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import types
import redis
import cPickle
from functools import wraps
import searchengine
from flask import g
from application import app
from mongomodel import MongoModel
def add_job(*args):
if not getattr(g,'redis',None):
g.redis = channel = redis.Redis(app.config['REDIS_HOST'],app.config['REDIS_PORT'],app.config['DB_REDIS_SEARCH'])
else:
#g.redis.select(app.config['DB_REDIS_SEARCH'])
assert g.redis.lpush(app.config['JOBS_KEY'],cPickle.dumps(args))
def update(indexfield,opera,fieldname,name=None,remote=True):
"""
update document item
opera support '$push' and '$set'
exp:
@update(['answer','uid'],'$push','answers','question')
def setanswer(qid,answer):
pass
"""
def __magic__(func):
@wraps(func)
def __do__(_id,obj):
result = func(_id,obj)
if type(obj) == types.DictType:
docid = SearchModel().getsearchid(_id)
if docid and not remote:
searchengine.update(docid,opera,indexfield,dict(( (fieldname,obj),)) )
elif docid and remote:
add_job('update',docid,opera,indexfield,dict(( (fieldname,obj),)),name)
return result
return __do__
return __magic__
def replace_index(indexfield,name=None):
"""docstring for reindex"""
"""
mongo model save warp for search engine
exp:
@reindex(['name','nick'],name='user')
def changeuser(uid,user):
return UserModel().changeuser(uid,user)
"""
def __magic__(func):
@wraps(func)
def __do__(_id,obj):
result = func(_id,obj)
if type(obj) == types.DictType and result:
docid = SearchModel().getsearchid(_id)
if docid:
obj['_id'] = _id
obj['name'] = name
sid = searchengine.index(indexfield,obj,docid=docid)
return result
return __do__
return __magic__
def index(indexfield,name=None,remote=True):
"""
mongo model save warp for search engine
exp:
@index(['name','nick'],'user',False)
def setuser(user):
return UserModel(**user).commit()
"""
def __magic__(func):
@wraps(func)
def __do__(obj):
_id = func(obj)
if type(obj) == types.DictType and _id:
obj['_id'] = _id
obj['name'] = name
if remote:
add_job('index',indexfield,obj,name)
else:
sid = searchengine.index(indexfield,obj,name)
SearchModel(_id=_id,sid=sid,name=name).commit()
return _id
return __do__
return __magic__
class SearchModel(MongoModel):
"""
mongo id <_id> mapping to search id <sid>
"""
def __init__(self,**ids):
self.ids = ids
if not getattr(g, 'mongo',None):
g.mongo = pymongo.Connection(app.config['MONGO_HOST'],app.config['MONGO_PORT'])
MongoModel.__init__(self, g.mongo.freeasker.search, '_id', None, 0)
def commit(self):
if not self.ids:
self.lasterror = 'Empty ids'
return False
if not self.ids.has_key('_id') or not self.ids.has_key('sid'):
self.lasterror = 'ids has not _id or sid'
return False
return self.save(**self.ids)
def getsearchid(self,_id):
return self.get(_id=_id).get('sid',None)
| Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 16, 2011
@author: apple
'''
import types
import cPickle
import xapian
from mmseg.search import seg_txt_2_dict
try:
from config import SEARCH_DB_PATH
except Exception, e:
import sys
if sys.platform.startswith('win'):
SEARCH_DB_PATH = 'E:\\xapiandb'
elif sys.platform.startswith('darwin'):
SEARCH_DB_PATH = '/Users/apple/Project/xapiandb'
else:
SEARCH_DB_PATH = '/var/xapiandata'
all = ['SEARCH_DB_PATH' , '__isenglish__' , 'SearchConnection' , 'IndexConnection']
AND = xapian.Query.OP_AND
OR = xapian.Query.OP_OR
AND_NOT = xapian.Query.OP_AND_NOT
XOR = xapian.Query.OP_XOR
OP_AND_MAYBE = xapian.Query.OP_AND_MAYBE
OP_FILTER = xapian.Query.OP_FILTER
OP_NEAR = xapian.Query.OP_NEAR
OP_PHRASE = xapian.Query.OP_PHRASE
OP_VALUE_RANGE = xapian.Query.OP_VALUE_RANGE
OP_SCALE_WEIGHT= xapian.Query.OP_SCALE_WEIGHT
OP_ELITE_SET = xapian.Query.OP_ELITE_SET
OP_VALUE_GE = xapian.Query.OP_VALUE_GE
OP_VALUE_LE = xapian.Query.OP_VALUE_LE
Query = xapian.Query
"""
IndexConnection + WriteConnection
"""
def index(indexfield,document,docid=None):
"""
Write and index document
if has a docid then update the document and reindex
"""
if type(indexfield) != types.ListType or type(document) != types.DictType:
return False
conn = WriteConnection(docid=docid,**document)
return conn.commit(*indexfield)
def search(s,unserialize=True,skip=0,limit=10):
"""
Simple search interface
"""
# raise TypeError,s+repr(type(s))
try:
s = s.encode('utf-8')
except:
pass
return SearchConnection().search(s,unserialize=unserialize,skip=skip,limit=limit)
def update(docid,opera,indexfield,doc):
return WriteConnection().update(docid,opera,indexfield,**doc)
def delete(docid):
return WriteConnection().delete(docid)
def raw_read(docid):
"""
Read a document by id
"""
return ReadConnection().read(docid,unserialize=True)
def raw_write(doc):
"""
Write a document
but the function don't index the document
"""
serialize = ( type(doc) != types.StringType )
return WriteConnection().write(doc,serialize=serialize)
def raw_replace(docid,doc):
"""
Simple update interface
replace the document
the function don't reindex .
"""
serialize = ( type(doc) != types.StringType )
return WriteConnection().replace(docid,doc,serialize=serialize)
def __isenglish__(s):
"""docstring for __"""
if not s : return True
s = s.strip()
return ord(s[0]) < 127
def __is_string__(s):
"""docstring for __is_string__"""
return type(s) == types.StringType or type(s) == types.UnicodeType
def __format_index__(fields,obj):
"""docstring for __format_index__"""
for i in fields:
field = obj.get(i,None)
if not __is_string__(field):
obj[i] = __flat__(field)
return obj
def __flat__(field):
result = []
if type(field) == types.ListType:
for i in field:
if not __is_string__(i):
result.extend(__flat__(i))
else:
result.append(i)
elif type(field) == types.DictType:
for k,v in field.items():
if not __is_string__(v):
result.extend(__flat__(v))
else:
result.append(v)
elif __is_string__(field):
result.append(field)
return ' '.join(result)
class Connection(object):
def __init__(self,path=None):
self.lasterror = ''
self.path = path or SEARCH_DB_PATH
self.prefix = 'Z%s'
@property
def error(self):
return self.lasterror
class SearchConnection(Connection):
def __init__(self,path=None,**kv):
Connection.__init__(self,path)
try:
self.db = xapian.Database(self.path)
except:
self.db = None
self.lasterror = 'Database error.'
raise xapian.DatabaseError,self.lasterror
def __del__(self):
try:
self.db.close()
except:
pass
def search(self,keywords,**kv):
"""
keywords type is list or string
if the's string auto seg
kv skip int limit,unserialize boolean
"""
if not self.db:
return []
skip = kv.pop('skip',0)
limit = kv.pop('limit',10)
enquire = xapian.Enquire(self.db)
query = self.__buildquery__(keywords,**kv)
# raise TypeError,repr(str(query))
# raise TypeError,repr(query.get_length())
try:
enquire.set_query(query)
result = enquire.get_mset(skip,limit)
unserialize = kv.pop('unserialize',None)
decode = cPickle.loads if unserialize else lambda x:x
return [{'detail':r,'data':decode(r.document.get_data())} for r in result]
except Exception,e:
raise TypeError,'query error.%s'%e
return []
def query(self,*querys,**kv):
if not self.db:
self.lasterror = 'Database error.'
return []
for q in querys:
if not isinstance(Query,q):
self.lasterror = 'Query type error.'
return []
skip = kv.pop('skip',0)
limit = kv.pop('limit',10)
opera = kv.pop('opera',OR)
unserialize = kv.pop('unserialize',None)
decode = cPickle.loads if unserialize else lambda x:x
enquire = xapian.Enquire(self.db)
query = Query(opera,querys)
return [{'detail':r,'data':decode(r.document.get_data())} for r in enquire.get_mset(skip,limit)]
def __buildquery__(self,keywords,**kv):
if type(keywords) == types.UnicodeType:
keywords = keywords.encode('utf-8')
if type(keywords) == types.StringType:
return self.__buildenglish__(keywords,**kv) if __isenglish__(keywords) else self.__buildchinese__(keywords,**kv)
elif type(keywords) == types.ListType:
opera = kv.pop('opera',OR)
return xapian.Query(self.prefix%opera,[xapian.Query(i) for i in keywords])
elif type(keywords) == types.DictType:
opera = kv.pop('opera',OR)
return xapian.Query(opera,[xapian.Query(self.prefix%word,value) for word,value, in keywords.items()])
else:
self.lasterror = 'keywords type error.'
raise TypeError,'keywords must is string or list or dict of keyword:value'
def __buildenglish__(self,keywords,**kv):
strategy = kv.pop('strategy',xapian.QueryParser.STEM_SOME)
qp = xapian.QueryParser()
qp.set_database(self.db)
stemmer = xapian.Stem('english')
qp.set_stemmer(stemmer)
qp.set_stemming_strategy(strategy)
qp.set_stemming_strategy(xapian.QueryParser.STEM_SOME)
query = qp.parse_query(keywords)
return query
def __buildchinese__(self,keywords,**kv):
opera = kv.pop('opera',OR)
#add a prefix
querys = [xapian.Query(self.prefix%word,value) for word,value in seg_txt_2_dict(keywords).iteritems()]
return querys[0] if len(querys) == 1 else xapian.Query(opera,querys)
class ReadConnection(Connection):
"""
Read document by id
"""
def __init__(self, path=None):
Connection.__init__(self,path)
try:
self.db = xapian.Database(self.path)
except:
self.db = None
self.lasterror = 'Database error.'
raise xapian.DatabaseError,self.lasterror
def read(self,docid,**kv):
if not self.db:
self.lasterror = 'Database error.'
return {}
decode = cPickle.loads if kv.get('unserialize',False) else lambda x:x
try:
doc = self.db.get_document(docid).get_data()
return decode(doc)
except Exception, e:
self.lasterror = 'Read error.%s'%e
return {}
def get_raw(self,docid):
if not self.db:
self.lasterror = 'Database error.'
return None
try:
return self.db.get_document(docid)
except Exception, e:
self.lasterror = 'Read error.%s'%e
return None
class WriteConnection(Connection):
"""
Write document
"""
def __init__(self, path=None,docid=None,**document):
Connection.__init__(self,path)
if docid:
self.doc = ReadConnection().read(docid)
else:
self.doc = {}
if type(self.doc) != types.DictType:
try:
doc = cPickle.loads(doc)
except Exception, e:
self.lasterror = 'Unserialize document error.'
self.docid,self.doc = None,{}
self.docid = docid
self.doc.update(document)
try:
self.db = xapian.WritableDatabase(self.path,xapian.DB_CREATE_OR_OPEN)
except Exception,e:
self.db = None
self.lasterror = 'Database error.%s'%e
raise xapian.DatabaseError,self.lasterror
def __make_index__(self,*fields):
if not self.doc:
self.lasterror = 'Database error.'
return False
self.indexfield = []
for i in fields:
field = self.doc.get(i,None)
if __is_string__(field):
self.indexfield.append(field.encode('utf-8'))
else:
self.indexfield.append(__flat__(field).encode('utf-8'))
def update(self,docid,opera,indexfield,**doc):
"""
"""
if not self.db:
self.lasterror = 'Database error.'
return False
if not docid:
self.lasterror = 'Empty document id.'
return False
if not doc:
self.lasterror = 'Empty document.'
return False
for k,v in doc.items():
self.__update(docid,opera,indexfield,{k:v})
try:
self.db.flush()
return True
except Exception,e:
self.lasterror = 'Flush Database error.%s'%e
return False
def __update(self,docid,opera,indexfield,doc):
document = ReadConnection().get_raw(docid)
old = cPickle.loads(document.get_data())
if opera == '$set':
# self.indexfield = [doc.get(i,'').encode('utf-8') for i in indexfield]
self.doc = doc
self.__make_index__(*indexfield)
old.update(doc)
elif opera == '$push':
olditem = old.get(doc.keys()[0],[])
# self.indexfield = [doc.values()[0].get(i,'').encode('utf-8') for i in indexfield]
self.doc = doc.values()[0]
self.__make_index__(*indexfield)
olditem.append(doc.values()[0])
old[doc.keys()[0]] = olditem
document.set_data(cPickle.dumps(old))
indexer = xapian.TermGenerator()
indexer.set_stemmer(xapian.Stem('english'))
indexer.index_text(' '.join(self.indexfield))
for i in indexer.get_document().termlist():
document.add_term(i.term)
self.db.replace_document(docid,document)
return self.docid
def commit(self,*indexfield):
if not self.doc:
self.lasterror = 'Empty document.'
return False
self.__make_index__(*indexfield)
return self.__insert__(self.indexfield)
def __insert__(self,indexfield):
if type(indexfield) != types.ListType:
self.lasterror = 'Indexfield type error.'
return False
if not len(indexfield):
self.lasterror = 'Empty indexfield.'
return False
return self.__insertenglish__(indexfield) if __isenglish__(indexfield[0]) else self.__insertchinese__(indexfield)
def __insertenglish__(self,indexfield):
indexer = xapian.TermGenerator()
stemmer = xapian.Stem("english")
indexer.set_stemmer(stemmer)
document = xapian.Document()
document.set_data(cPickle.dumps(self.doc))
indexer.set_document(document)
indexer.index_text(' '.join(indexfield))
try:
if self.docid:
self.db.replace_document(self.docid,document)
self.db.flush()
return self.docid
else:
docid = self.db.add_document(document)
self.db.flush()
return docid
except Exception,e:
print e
self.lasterror = 'Flush db error.'
return False
def __insertchinese__(self, indexfield):
document = xapian.Document()
document.set_data(cPickle.dumps(self.doc))
words = ' '.join(indexfield)
for word,value in seg_txt_2_dict(words).iteritems():
document.add_term(self.prefix%word,value)
try:
if self.docid:
self.db.replace_document(self.docid,document)
self.db.flush()
return self.docid
else:
docid = self.db.add_document(document)
self.db.flush()
return docid
except Exception,e:
self.lasterror = 'Flush db error.%s'%e
return False
def index(self,document,indexfield=[]):
if indexfield and type(indexfield) != types.ListType:
self.lasterror = 'Indexfield type error.'
return False
self.doc = document if type(document) == types.StringType else cPickle.dumps(document)
return self.commit(*indexfield)
def write(self,doc,**kv):
if not self.db:
self.lasterror = 'Database error.'
return False
encode = cPickle.dumps if kv.get('serialize',False) else lambda x:x
try:
docid = self.db.add_document(encode(doc))
self.db.flush()
# Process(target=self.db.flush,args=()).start()
return docid
except Exception, e:
self.lasterror = 'Write error.%s'%e
return False
def replace(self,docid,doc,**kv):
if not self.db:
self.lasterror = 'Database error.'
return False
encode = cPickle.dumps if kv.get('serialize',False) else lambda x:x
try:
document = xapian.Document()
document.set_data(encode(doc))
self.db.replace_document(docid,document)
self.db.flush()
# Process(target=self.db.flush,args=()).start()
return docid
except Exception, e:
self.lasterror = 'Write error.%s'%e
return False
def delete(self,docid):
"""
delete document
"""
if not self.db:
self.lasterror = 'Database error.'
return False
try:
return self.db.delete_document(docid)
except Exception, e:
self.lasterror = 'Delect error.%s'%e
return False
def __len__(self):
if not self.db:
self.lasterror = 'Database error.'
return False
try:
return self.db.get_doccount()
except Exception, e:
self.lasterror = 'Count error.%s'%e
return False
if __name__ == '__main__':
print delete(72)
# wc = WriteConnection()
# print wc.update(9,'$push',['answer','uid'],answers={'answer':'pointing to start of positionlist','uid':'ida'})
# print wc.error
# print update(9,'$push',['answer','uid'],{'answers':{'answer':'pointing to start of positionlist','uid':'ida'}})
# print raw_read(9)
# print raw_replace(73,{'author':'joson','title':'footbar golang','summary':'italy usa tokyo11','extra':'alan'})
# print search('联系')
# print raw_read(73)
# print index(['author','title','summary'],{'author':'青少年','title':'上赛季利物浦获得了联赛的第六名,新赛季他们的目标应该是进入前四,锁定一个欧冠联赛的席位,不过杰拉德表示,红军在传统上是冠军级别的队伍,他们的目标会更高。','summary':'italy usa tokyo11','extra':'alan'})
# print SearchConnection().search('精英赛',unserialize=True,opera=OR) | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
searchmodel.py
Created by AlanYang on 2011-06-20.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import types
import redis
import cPickle
from functools import wraps
import searchengine
from flask import g
from application import app
from mongomodel import MongoModel
def add_job(*args):
if not getattr(g,'redis',None):
g.redis = channel = redis.Redis(app.config['REDIS_HOST'],app.config['REDIS_PORT'],app.config['DB_REDIS_SEARCH'])
else:
#g.redis.select(app.config['DB_REDIS_SEARCH'])
assert g.redis.lpush(app.config['JOBS_KEY'],cPickle.dumps(args))
def update(indexfield,opera,fieldname,name=None,remote=True):
"""
update document item
opera support '$push' and '$set'
exp:
@update(['answer','uid'],'$push','answers','question')
def setanswer(qid,answer):
pass
"""
def __magic__(func):
@wraps(func)
def __do__(_id,obj):
result = func(_id,obj)
if type(obj) == types.DictType:
docid = SearchModel().getsearchid(_id)
if docid and not remote:
searchengine.update(docid,opera,indexfield,dict(( (fieldname,obj),)) )
elif docid and remote:
add_job('update',docid,opera,indexfield,dict(( (fieldname,obj),)),name)
return result
return __do__
return __magic__
def replace_index(indexfield,name=None):
"""docstring for reindex"""
"""
mongo model save warp for search engine
exp:
@reindex(['name','nick'],name='user')
def changeuser(uid,user):
return UserModel().changeuser(uid,user)
"""
def __magic__(func):
@wraps(func)
def __do__(_id,obj):
result = func(_id,obj)
if type(obj) == types.DictType and result:
docid = SearchModel().getsearchid(_id)
if docid:
obj['_id'] = _id
obj['name'] = name
sid = searchengine.index(indexfield,obj,docid=docid)
return result
return __do__
return __magic__
def index(indexfield,name=None,remote=True):
"""
mongo model save warp for search engine
exp:
@index(['name','nick'],'user',False)
def setuser(user):
return UserModel(**user).commit()
"""
def __magic__(func):
@wraps(func)
def __do__(obj):
_id = func(obj)
if type(obj) == types.DictType and _id:
obj['_id'] = _id
obj['name'] = name
if remote:
add_job('index',indexfield,obj,name)
else:
sid = searchengine.index(indexfield,obj,name)
SearchModel(_id=_id,sid=sid,name=name).commit()
return _id
return __do__
return __magic__
class SearchModel(MongoModel):
"""
mongo id <_id> mapping to search id <sid>
"""
def __init__(self,**ids):
self.ids = ids
if not getattr(g, 'mongo',None):
g.mongo = pymongo.Connection(app.config['MONGO_HOST'],app.config['MONGO_PORT'])
MongoModel.__init__(self, g.mongo.freeasker.search, '_id', None, 0)
def commit(self):
if not self.ids:
self.lasterror = 'Empty ids'
return False
if not self.ids.has_key('_id') or not self.ids.has_key('sid'):
self.lasterror = 'ids has not _id or sid'
return False
return self.save(**self.ids)
def getsearchid(self,_id):
return self.get(_id=_id).get('sid',None)
| Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 6, 2011
@author: apple
'''
import redis
import pymongo
from flask import Flask,g
from flask.signals import Namespace
from flaskext.cache import Cache
app = Flask(__name__)
app.config.from_object('config.DevelopmentConfig')
cache = Cache(app)
signals = Namespace()
app.signal_addquestion = signals.signal('addquestion')
app.signal_addanswer = signals.signal('addanswer')
app.signal_getquestion = signals.signal('getquestion')
app.signal_indexobject = signals.signal('indexobject')
app.signal_addtagscore = signals.signal('addtagscore')
app.signal_tagquestions= signals.signal('tagquestions')
app.signal_gettopcatalog = signals.signal('gettopcatalog')
@app.before_request
def before_request():
pass
@app.after_request
def after_request(response):
if getattr(g, 'mongo',None):
g.mongo.disconnect()
if getattr(g, 'redis' , None):
#config redis.conf timeout auto disconnect
pass
# g.redis.connection.disconnect()
if getattr(g , 'memcache' , None):
g.memcache.disconnect_all()
return response
from apps.user.views import user
from apps.question.views import question
from apps.tag.views import tag
from apps.catalog.views import catalog
app.register_module(question)
app.register_module(user, url_prefix='/u')
app.register_module(tag, url_prefix='/t')
app.register_module(catalog,url_prefix='/c')
| Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 6, 2011
@author: apple
'''
| Python |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Jun 9, 2011
@author: apple
'''
import sys
from optparse import OptionParser
from application import app
if __name__ == '__main__':
app.run(host = '0.0.0.0',port=7777,debug=True)
sys.exit(0)
parser = OptionParser()
parser.add_option('-r', '--runtype', dest='runtype' , default='raw',
help='Application run type,valid args \'raw\' \'geventwsgi\'.')
parser.add_option('-d', '--daemon', dest='daemon',
help='Daemon run applition.', action='store_true')
parser.add_option('-a', '--host', dest='host', default='127.0.0.1',
help='Application host.')
parser.add_option('-p', '--port', dest='port', default=7777, type='int',
help='Application port.')
parser.add_option('-l', '--log', dest='islogging', action='store_true',
help='Running in debug mode.')
options, _a = parser.parse_args()
options.islogging = options.islogging or False
if options.runtype == 'geventwsgi':
from gevent.wsgi import WSGIServer
svr = WSGIServer((options.host, options.port), app)
if options.daemon:
import daemon
with daemon.DaemonContext():
log = '' if not options.islogging else 'default'
svr.serve_forever(log=log)
else:
svr.serve_forever()
elif options.runtype == 'raw':
if options.daemon:
import daemon
with daemon.DaemonContext():
app.run(options.host, options.port, debug=options.islogging)
else:
app.run(options.host, options.port, debug=options.islogging)
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for this project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = ()
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles' # i.e., Mountain View
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Whether to append trailing slashes to URLs.
APPEND_SLASH = False
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'hubba-hubba'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
)
MIDDLEWARE_CLASSES = (
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.contenttypes',
)
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
url.py
@author Andrey Pavlenko
"""
from django.conf.urls.defaults import *
urlpatterns = patterns(
'',
(r'^$', 'views.index'),
(r'^new$', 'views.new'),
(r'^edit/(\d+)$', 'views.edit'),
)
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
views.py
@author Andrey Pavlenko
"""
import os
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
import django
from django import http
from django import shortcuts
from model import Post
class PostForm(djangoforms.ModelForm):
class Meta:
model = Post
exclude = ['comments_count', 'author', 'modify_date']
def respond(request, user, template, params=None):
"""Helper to render a response, passing standard stuff to the response.
Args:
request: The request object.
user: The User object representing the current user; or None if nobody
is logged in.
template: The template name; '.html' is appended automatically.
params: A dict giving the template parameters; modified in-place.
Returns:
Whatever render_to_response(template, params) returns.
Raises:
Whatever render_to_response(template, params) raises.
"""
if params is None:
params = {}
if user:
params['user'] = user
params['sign_out'] = users.create_logout_url('/')
params['is_admin'] = (users.is_current_user_admin() and
'Dev' in os.getenv('SERVER_SOFTWARE'))
else:
params['sign_in'] = users.create_login_url(request.path)
if not template.endswith('.html'):
template += '.html'
return shortcuts.render_to_response(template, params)
def index(request):
"""Request / -- show first page with 5 posts."""
user = users.get_current_user()
posts = db.GqlQuery('SELECT * FROM Post ORDER BY public_date DESC')
return respond(request, user, 'list', {'posts': posts})
def edit(request, post_id):
"""Create or edit a post. GET shows a blank form, POST processes it."""
user = users.get_current_user()
if user is None:
return http.HttpResponseForbidden('You must be signed in to add or edit a post')
if not users.is_current_user_admin():
return http.HttpResponseForbidden('Only admin may edit post')
post = None
if post_id:
post = Post.get(db.Key.from_path(Post.kind(), int(post_id)))
if post is None:
return http.HttpResponseNotFound('No post exists with that key (%r)' % post_id)
form = PostForm(data=request.POST or None, instance=post)
if not request.POST:
return respond(request, user, 'post', {'form': form, 'post': post})
errors = form.errors
if not errors:
try:
post = form.save(commit=False)
except ValueError, err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, user, 'post', {'form': form, 'post':post})
if not post.author:
post.author = user
post.put()
return http.HttpResponseRedirect('/')
def new(request):
"""Create a post. GET shows a blank form, POST processes it."""
return edit(request, None)
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bootstrap for running a Django app under Google App Engine.
The site-specific code is all in other files: settings.py, urls.py,
models.py, views.py. And in fact, only 'settings' is referenced here
directly -- everything else is controlled from there.
"""
# Standard Python imports.
import os
import sys
import logging
import __builtin__
# Google App Hosting imports.
from google.appengine.ext.webapp import util
import pickle
sys.modules['cPickle'] = pickle
# Enable info logging by the app (this is separate from appserver's
# logging).
logging.getLogger().setLevel(logging.INFO)
# Force sys.path to have our own directory first, so we can import from it.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
# Must set this env var *before* importing any part of Django.
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
# Make sure we can import Django. We may end up needing to do this
# little dance, courtesy of Google third-party versioning hacks. Note
# that this patches up sys.modules, so all other code can just use
# "from django import forms" etc.
try:
from django import v0_96 as django
except ImportError:
pass
# Import the part of Django that we use here.
import django.core.handlers.wsgi
def main():
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
model.py
@author Andrey Pavlenko
"""
import datetime
from google.appengine.ext import db
from google.appengine.api import users
class Post(db.Model):
'''
Object for blog post and page.
'''
author=db.UserProperty(required = True, auto_current_user_add = True)
title=db.StringProperty()
slug=db.StringProperty()
content=db.TextProperty()
public_date=db.DateTimeProperty(auto_now_add=True)
modify_date=db.DateTimeProperty(auto_now = True)
is_draft = db.BooleanProperty()
enable_comments = db.BooleanProperty()
comments_count = db.IntegerProperty(default = 0)
tags = db.CategoryProperty()
| Python |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| Python |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 Paulo Jerônimo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__='Paulo Jerônimo (paulojeronimo@gmail.com)'
# This code is a reduction / adaptation of mirrorrr project
# (http://code.google.com/p/mirrorrr/ by Brett Slatkin)
# held specifically to achieve the goals to build a proxy for files in an
# account published in the DropBox.
# If you want a full proxy to run on GAE, use the mirrorr.
# Set up your Dropbox user number here:
DROPBOX_USER = '345266'
DROPBOX_PREFIX ='/dl.dropbox.com/u/'
DEBUG = False
HTTP_PREFIX = "http://"
IGNORE_HEADERS = frozenset([
'set-cookie',
'expires',
'cache-control',
# Ignore hop-by-hop headers
'connection',
'keep-alive',
'proxy-authenticate',
'proxy-authorization',
'te',
'trailers',
'transfer-encoding',
'upgrade',
])
import logging
import wsgiref.handlers
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.runtime import apiproxy_errors
class MirroredContent(object):
def __init__(self, original_address, translated_address,
status, headers, data, base_url):
self.original_address = original_address
self.translated_address = translated_address
self.status = status
self.headers = headers
self.data = data
self.base_url = base_url
@staticmethod
def fetch_and_store(base_url, translated_address, mirrored_url):
"""Fetch a page.
Args:
base_url: The hostname of the page that's being mirrored.
translated_address: The URL of the mirrored page on this site.
mirrored_url: The URL of the original page. Hostname should match
the base_url.
Returns:
A new MirroredContent object, if the page was successfully retrieved.
None if any errors occurred or the content could not be retrieved.
"""
logging.debug("Fetching '%s'", mirrored_url)
try:
response = urlfetch.fetch(mirrored_url)
except (urlfetch.Error, apiproxy_errors.Error):
logging.exception("Could not fetch URL")
return None
adjusted_headers = {}
for key, value in response.headers.iteritems():
adjusted_key = key.lower()
if adjusted_key not in IGNORE_HEADERS:
adjusted_headers[adjusted_key] = value
return MirroredContent(
base_url=base_url,
original_address=mirrored_url,
translated_address=translated_address,
status=response.status_code,
headers=adjusted_headers,
data=response.content)
class MirrorHandler(webapp.RequestHandler):
def get_relative_url(self):
slash = self.request.url.find("/", len(self.request.scheme + "://"))
if slash == -1:
return "/"
return DROPBOX_PREFIX + DROPBOX_USER + self.request.url[slash:]
def get(self, base_url):
assert base_url
logging.debug('User-Agent = "%s", Referrer = "%s"',
self.request.user_agent,
self.request.referer)
logging.debug('Base_url = "%s", url = "%s"', base_url, self.request.url)
translated_address = self.get_relative_url()[1:] # remove leading /
content = MirroredContent.fetch_and_store(base_url, translated_address,
HTTP_PREFIX + translated_address)
if content is None:
return self.error(404)
for key, value in content.headers.iteritems():
self.response.headers[key] = value
self.response.out.write(content.data)
app = webapp.WSGIApplication([
(r"/", MirrorHandler),
(r"/([^/]+).*", MirrorHandler)
], debug=DEBUG)
def main():
wsgiref.handlers.CGIHandler().run(app)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#==============================================================================
# PyTyle - An on-demand tiling manager
# Copyright (C) 2009-2010 Andrew Gallant <andrew@pytyle.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#==============================================================================
import traceback, sys, time
import xcb.xproto
import pt.config as config
import pt.ptxcb as ptxcb
import pt.state as state
from pt.dispatcher import Dispatcher
from pt.window import Window
from pt.tile import Tile
state.apply_config()
# Apply config may add tiling actions to the queue
# if tile_on_startup is enabled
ptxcb.Window.exec_queue()
Tile.exec_queue()
ptxcb.connection.push()
while True:
try:
event_data = ptxcb.event.dispatch(
ptxcb.connection.conn.wait_for_event()
)
except xcb.xproto.BadWindow, error:
continue
except xcb.xproto.BadAccess, error:
print error
break
except xcb.xproto.AccessError, error:
print error
continue
if not event_data:
continue
d = Dispatcher(event_data)
if d.stop():
break
ptxcb.connection.disconnect()
| Python |
#==============================================================================
# PyTyle - An on-demand tiling manager
# Copyright (C) 2009-2010 Andrew Gallant <andrew@pytyle.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#==============================================================================
import sys
from distutils import sysconfig
from distutils.core import setup
try:
import xcb.xproto, xcb.xcb, xcb.xinerama, xcb.randr
except:
print ''
print 'PyTyle 2 requires the X Python Binding'
print 'See: http://cgit.freedesktop.org/xcb/xpyb/'
print 'Or xpyb-ng can be used. See', 'https://github.com/dequis/xpyb-ng'
sys.exit(0)
setup(
name = "pytyle2",
author = "Andrew Gallant",
author_email = "andrew@pytyle.com",
version = "2.0.0",
license = "GPL",
description = "An on-demand tiling manager for EWMH compliant window managers",
long_description = "See README",
url = "http://pytyle.com",
platforms = 'POSIX',
packages = ['pt', 'pt.ptxcb', 'pt.tilers'],
data_files = [
(
sysconfig.get_python_lib() + '/pt',
[
'./config.ini', './INSTALL', './LICENSE',
'./README', './TODO', './CHANGELOG'
]
)
],
scripts = ['pytyle2']
)
| Python |
import ptxcb
from tile import Tile
from container import Container
class ManualTile(Tile):
def __init__(self, monitor):
Tile.__init__(self, monitor)
self.root = None
self.catchall = None
#
# Helper methods
#
def add(self, win):
if (
win.tilable() and self.tiling and self.root and
win.get_winclass().lower() not in self.get_option('ignore')
):
possible = self.get_open_leaf()
if possible:
possible.cont.set_window(win)
else:
self.get_last_active().activate()
self.enqueue()
def remove(self, win, reset_window=False):
if win.container and not win.pytyle_place_holder() and self.tiling:
win.container.set_window()
self.enqueue()
def find_container(self, cont):
assert self.root
for child in self.root.childs():
if child.cont == cont:
return child
return None
def get_active(self):
assert self.root
active = self.monitor.get_active()
if active:
if active.container:
find_leaf = self.find_container(active.container)
if find_leaf:
return find_leaf
return self.root.childs().next()
def get_active_cont(self):
return self.get_active().cont
def get_last_active(self):
assert self.root
possibles = self.get_last_active_list()
return possibles[-1] or self.root.childs().next()
def get_last_active_list(self):
assert self.root
wids = ptxcb.XROOT.get_window_stacked_ids()
possibles = {}
for win in self.monitor.iter_windows():
if win.id in wids and win.container and win.container.tiler == self:
leaf = self.find_container(win.container)
if leaf:
possibles[wids.index(win.id)] = leaf
retval = []
for i in sorted(possibles):
retval.append(possibles[i])
return retval
def get_open_leaf(self):
assert self.root
if self.catchall and self.catchall.cont.empty:
return self.catchall
for leaf in self.get_last_active_list()[::-1]:
if leaf.cont.empty:
return leaf
for child in self.root.childs():
if child.cont.empty:
return child
return None
def iter_hidden(self):
for win in self.monitor.iter_windows():
if (
not win.container and win.tilable() and
win.get_winclass().lower() not in self.get_option('ignore')
):
yield win
def promote(self):
possible = self.get_open_leaf()
if possible:
for win in self.iter_hidden():
possible.cont.set_window(win)
possible = self.get_open_leaf()
if not possible:
break
def borders_add(self, do_window=True):
assert self.root
for child in self.root.childs():
child.cont.decorations(False, do_window)
def borders_remove(self, do_window=True):
assert self.root
for child in self.root.childs():
child.cont.decorations(True, do_window)
def mouse_find(self, x, y):
assert self.root
for child in self.root.childs():
x1, x2 = child.x, child.x + child.w
y1, y2 = child.y, child.y + child.h
if (
x >= x1 and x <= x2 and
y >= y1 and y <= y2
):
return child.cont
return None
def mouse_switch(self, cont, x, y):
assert self.root
switch = self.mouse_find(x, y)
if switch:
cont.switch(switch)
def destroy(self):
if self.root:
self.cmd_untile()
def detach(self):
self.tiling = False
if self.root:
for win in self.iter_hidden():
win.set_below(False)
for child in self.root.childs():
child.cont.remove()
#
# Commands
#
def cmd_close_frame(self):
assert self.root
frm = self.get_active()
if frm == self.root:
return
frm.parent.remove_child(frm)
if not frm.cont.win.pytyle_place_holder():
frm.cont.win.restack(True)
frm.cont.remove(True)
self.get_last_active().activate()
self.promote()
self.enqueue()
def cmd_only(self):
assert self.root
only = self.get_active()
if only == self.root:
return
for child in self.root.childs():
child.cont.remove()
self.root = None
self.enqueue()
def cmd_cycle(self):
assert self.root
frm = self.get_active()
for child in self.root.childs():
if frm != child and child.hidden:
child.reset_cycle()
frm.reset_cycle()
hidden = frm.get_hidden_list()
if len(hidden) > 1:
ind = frm.cyc_ind % len(hidden)
if hidden[ind] == frm.cont.win:
ind += 1
ind %= len(hidden)
frm.cont.window_below(True)
frm.cont.remove()
frm.cont.set_window(hidden[ind])
frm.cont.still()
frm.cont.window_below(False)
frm.cyc_ind += 1
frm.activate()
def cmd_toggle_catchall(self):
assert self.root
frm = self.get_active()
if self.catchall:
self.catchall.cont.default_color = self.get_option(
'borders_inactive_color'
)
if self.catchall != frm:
self.catchall.cont.still()
if self.catchall != frm:
self.catchall = frm
self.catchall.cont.default_color = self.get_option(
'borders_catchall_color'
)
else:
self.catchall = None
def cmd_float(self):
assert self.root
active = self.monitor.get_active()
if (
active and active.monitor.workspace.id == self.workspace.id and
active.monitor.id == self.monitor.id
):
if not active.floating:
active.floating = True
self.remove(active, reset_window=True)
else:
active.floating = False
self.add(active)
def cmd_hsplit(self):
assert self.root
try:
cont = Container(self, self.iter_hidden().next())
except StopIteration:
cont = Container(self)
self.get_active().hsplit(cont)
self.enqueue()
def cmd_vsplit(self):
assert self.root
try:
cont = Container(self, self.iter_hidden().next())
except StopIteration:
cont = Container(self)
self.get_active().vsplit(cont)
self.enqueue()
def cmd_up(self):
assert self.root
frm = self.get_active().up()
if frm:
frm.activate()
def cmd_up_move(self):
assert self.root
active = self.get_active()
frm = active.up()
if active and frm:
active.cont.switch(frm.cont)
def cmd_up_resize(self):
assert self.root
frm = self.get_active()
if not frm.down(self.get_option('shallow_resize')):
frm.set_up_proportion(self.get_option('step_size'))
else:
frm.set_down_proportion(-self.get_option('step_size'))
self.enqueue()
def cmd_up_increase(self):
assert self.root
frm = self.get_active()
frm.set_up_proportion(self.get_option('step_size'))
self.enqueue()
def cmd_up_decrease(self):
assert self.root
frm = self.get_active()
frm.set_down_proportion(-self.get_option('step_size'))
self.enqueue()
def cmd_down(self):
assert self.root
frm = self.get_active().down()
if frm:
frm.activate()
def cmd_down_move(self):
assert self.root
active = self.get_active()
frm = active.down()
if active and frm:
active.cont.switch(frm.cont)
def cmd_down_resize(self):
assert self.root
frm = self.get_active()
if not frm.down(self.get_option('shallow_resize')):
frm.set_up_proportion(-self.get_option('step_size'))
else:
frm.set_down_proportion(self.get_option('step_size'))
self.enqueue()
def cmd_down_increase(self):
assert self.root
frm = self.get_active()
frm.set_down_proportion(self.get_option('step_size'))
self.enqueue()
def cmd_down_decrease(self):
assert self.root
frm = self.get_active()
frm.set_up_proportion(-self.get_option('step_size'))
self.enqueue()
def cmd_left(self):
assert self.root
frm = self.get_active().left()
if frm:
frm.activate()
def cmd_left_move(self):
assert self.root
active = self.get_active()
frm = active.left()
if active and frm:
active.cont.switch(frm.cont)
def cmd_left_resize(self):
assert self.root
frm = self.get_active()
if not frm.right(self.get_option('shallow_resize')):
frm.set_left_proportion(self.get_option('step_size'))
else:
frm.set_right_proportion(-self.get_option('step_size'))
self.enqueue()
def cmd_left_increase(self):
assert self.root
frm = self.get_active()
frm.set_left_proportion(self.get_option('step_size'))
self.enqueue()
def cmd_left_decrease(self):
assert self.root
frm = self.get_active()
frm.set_right_proportion(-self.get_option('step_size'))
self.enqueue()
def cmd_right(self):
assert self.root
frm = self.get_active().right()
if frm:
frm.activate()
def cmd_right_move(self):
assert self.root
active = self.get_active()
frm = active.right()
if active and frm:
active.cont.switch(frm.cont)
def cmd_right_resize(self):
assert self.root
frm = self.get_active()
if not frm.right(self.get_option('shallow_resize')):
frm.set_left_proportion(-self.get_option('step_size'))
else:
frm.set_right_proportion(self.get_option('step_size'))
self.enqueue()
def cmd_right_increase(self):
assert self.root
frm = self.get_active()
frm.set_right_proportion(self.get_option('step_size'))
self.enqueue()
def cmd_right_decrease(self):
assert self.root
frm = self.get_active()
frm.set_left_proportion(-self.get_option('step_size'))
self.enqueue()
def cmd_tile(self):
Tile.cmd_tile(self)
if not self.root:
self.root = LeafFrame(self, None, Container(self))
active = self.monitor.get_active()
if active:
self.add(active)
for win in self.monitor.iter_windows():
if win != active:
self.add(win)
else:
self.promote()
for child in self.root.childs():
if child.cont.empty and not child.cont.win:
child.cont.set_window(force=True)
self.root.moveresize(
self.monitor.wa_x, self.monitor.wa_y,
self.monitor.wa_width, self.monitor.wa_height
)
for win in self.iter_hidden():
win.set_below(True)
for child in self.root.childs():
child.cont.window_below(False)
child.reset_cycle()
def cmd_untile(self):
assert self.root
Tile.cmd_untile(self)
for win in self.iter_hidden():
win.set_below(False)
for child in self.root.childs():
child.cont.remove(reset_window=True)
self.root = None
def cmd_print_tree(self):
print '-' * 30
print 'Hidden:', [leaf for leaf in self.iter_hidden()]
print 'Catchall:', self.catchall
print '-' * 15
self.root.print_tree()
print '-' * 30
class Frame(object):
def __init__(self, tiler, parent):
self.tiler = tiler
self.parent = parent
self.children = []
self.proportion = 1.0
self.x = 0
self.y = 0
self.w = 1
self.h = 1
def print_tree(self, depth=0):
tp = ('\t' * depth) + '%d,%d,%d,%d,%s' % (self.x, self.y, self.w, self.h, self.__class__.__name__)
print tp
for child in self.children:
child.print_tree(depth + 1)
def childs(self):
for child in self.children:
if isinstance(child, LeafFrame):
yield child
else:
for c1 in child.childs():
yield c1
def add_child(self, frame, before_index=None):
assert (
isinstance(frame, Frame) and frame not in self.children and
(before_index is None or before_index < len(self.children))
)
# Add the child
if before_index is not None:
self.children.insert(before_index, frame)
else:
self.children.append(frame)
def remove_child(self, frame):
assert isinstance(frame, Frame) and frame in self.children
# Remove the child
self.children.remove(frame)
# If there is only one remaning child, then merge them!
if len(self.children) == 1:
child = self.children[0]
child.proportion = self.proportion
child.parent = self.parent
if not child.parent:
child.tiler.root = child
else:
child.parent.replace_child(self, child)
else:
# How much are we adding to each remaining child?
add = frame.proportion / len(self.children)
# Apply it!
for child in self.children:
child.proportion += add
def replace_child(self, find, replace):
assert (
isinstance(find, Frame) and
isinstance(replace, Frame) and
find in self.children
)
self.children[self.children.index(find)] = replace
def moveresize(self, x, y, w, h):
self._moveresize(x, y, w, h)
def _moveresize(self, x, y, w, h):
self.x, self.y, self.w, self.h = x, y, w, h
class LeafFrame(Frame):
def __init__(self, tiler, parent, cont):
Frame.__init__(self, tiler, parent)
self.cont = cont
self.cyc_ind = 0
self.hidden = []
def print_tree(self, depth=0):
tp = ('\t' * depth) + '%d,%d,%d,%d,%f,LEAF' % (self.x, self.y, self.w, self.h, self.proportion)
if self.cont.win:
tp += ' --- ' + self.cont.win.__str__()
print tp
def childs(self):
yield self
def add_child(self, frame):
pass
def remove_child(self, frame):
pass
def activate(self):
self.cont.activate()
def get_hidden_list(self):
if self.hidden:
return self.hidden
self.hidden = []
for child in self.tiler.iter_hidden():
self.hidden.append(child)
self.hidden.append(self.cont.win)
return self.hidden
def reset_cycle(self):
self.hidden = []
self.cyc_ind = 0
def moveresize(self, x, y, w, h):
self._moveresize(x, y, w, h)
if self.parent:
siblings = self.parent.children
right = siblings[siblings.index(self) + 1:]
new_x = x + w
new_w_subtract = (self.parent.w - new_x) / len(right)
for sibling in right:
width = sibling.w - new_w_subtract
sibling._moveresize(new_x, sibling.y, width, sibling.h)
new_x += width
def _moveresize(self, x, y, w, h):
Frame._moveresize(self, x, y, w, h)
self.cont.moveresize(x, y, w, h)
self.cont.window_raise()
def _find_like_parent(self, cls, no_child_index):
child = self
parent = self.parent
while parent:
if (isinstance(parent, cls) and
parent.children[no_child_index] != child):
break
child = parent
parent = parent.parent
return parent, child
def select(self, **args):
return self
def set_up_proportion(self, prop_change):
parent, child = self.parent, self
if not self.tiler.get_option('shallow_resize'):
parent, child = self._find_like_parent(VerticalFrame, 0)
if parent:
parent.set_up_proportion(child, prop_change)
def set_down_proportion(self, prop_change):
parent, child = self.parent, self
if not self.tiler.get_option('shallow_resize'):
parent, child = self._find_like_parent(VerticalFrame, -1)
if parent:
parent.set_down_proportion(child, prop_change)
def set_left_proportion(self, prop_change):
parent, child = self.parent, self
if not self.tiler.get_option('shallow_resize'):
parent, child = self._find_like_parent(HorizontalFrame, 0)
if parent:
parent.set_left_proportion(child, prop_change)
def set_right_proportion(self, prop_change):
parent, child = self.parent, self
if not self.tiler.get_option('shallow_resize'):
parent, child = self._find_like_parent(HorizontalFrame, -1)
if parent:
parent.set_right_proportion(child, prop_change)
def up(self, shallow=False):
parent, child = self.parent, self
if not shallow:
parent, child = self._find_like_parent(VerticalFrame, 0)
elif parent and not isinstance(parent, VerticalFrame):
parent, child = parent.parent, parent
if parent and parent.children[0] != child:
cs = parent.children
return cs[cs.index(child) - 1].select(
where='up',
x=self.x,
w=self.w
)
return None
def down(self, shallow=False):
parent, child = self.parent, self
if not shallow:
parent, child = self._find_like_parent(VerticalFrame, -1)
elif parent and not isinstance(parent, VerticalFrame):
parent, child = parent.parent, parent
if parent and parent.children[-1] != child:
cs = parent.children
return cs[cs.index(child) + 1].select(
where='down',
x=self.x,
w=self.w
)
return None
def left(self, shallow=False):
parent, child = self.parent, self
if not shallow:
parent, child = self._find_like_parent(HorizontalFrame, 0)
elif parent and not isinstance(parent, HorizontalFrame):
parent, child = parent.parent, parent
if parent and parent.children[0] != child:
cs = parent.children
return cs[cs.index(child) - 1].select(
where='left',
y=self.y,
h=self.h
)
return None
def right(self, shallow=False):
parent, child = self.parent, self
if not shallow:
parent, child = self._find_like_parent(HorizontalFrame, -1)
elif parent and not isinstance(parent, HorizontalFrame):
parent, child = parent.parent, parent
if parent and parent.children[-1] != child:
cs = parent.children
return cs[cs.index(child) + 1].select(
where='right',
y=self.y,
h=self.h
)
return None
def hsplit(self, cont):
assert isinstance(cont, Container)
# No parent for now
leaf = LeafFrame(self.tiler, None, cont)
if isinstance(self.parent, HorizontalFrame):
leaf.proportion = 1.0 / len(self.parent.children)
if self == self.parent.children[-1]:
self.parent.add_child(leaf)
else:
self.parent.add_child(leaf, self.parent.children.index(self) + 1)
clen = float(len(self.parent.children))
factor = (clen - 1) / clen
for child in self.parent.children:
child.proportion *= factor
else:
self.parent = HorizontalFrame(self.tiler, self.parent)
self.parent.proportion = self.proportion
self.proportion = 0.5
leaf.proportion = 0.5
self.parent.add_child(self)
self.parent.add_child(leaf)
if not self.parent.parent:
self.tiler.root = self.parent
else:
self.parent.parent.replace_child(self, self.parent)
leaf.parent = self.parent
def vsplit(self, cont):
assert isinstance(cont, Container)
# No parent for now
leaf = LeafFrame(self.tiler, None, cont)
if isinstance(self.parent, VerticalFrame):
leaf.proportion = 1.0 / len(self.parent.children)
if self == self.parent.children[-1]:
self.parent.add_child(leaf)
else:
self.parent.add_child(leaf, self.parent.children.index(self) + 1)
clen = float(len(self.parent.children))
factor = (clen - 1) / clen
for child in self.parent.children:
child.proportion *= factor
else:
self.parent = VerticalFrame(self.tiler, self.parent)
self.parent.proportion = self.proportion
self.proportion = 0.5
leaf.proportion = 0.5
self.parent.add_child(self)
self.parent.add_child(leaf)
if not self.parent.parent:
self.tiler.root = self.parent
else:
self.parent.parent.replace_child(self, self.parent)
leaf.parent = self.parent
def __str__(self):
return self.cont.__str__()
class HorizontalFrame(Frame):
def select(self, where, x=None, y=None, w=None, h=None):
if where == 'left':
return self.children[-1].select(
where=where, x=x, y=y, w=w, h=h
)
elif where == 'right':
return self.children[0].select(
where=where, x=x, y=y, w=w, h=h
)
elif where in ('up', 'down'):
if x is not None and w is not None:
# Find the frame with the most overlap...
overlap = []
for c in self.children:
overlap.append(intoverlap(
x, x + w,
c.x, c.x + c.w
))
mi = overlap.index(max(overlap))
return self.children[mi].select(
where=where, x=x, y=y, w=w, h=h
)
else:
return self.children[0].select(
where=where, x=x, y=y, w=w, h=h
)
return None
def set_up_proportion(self, child, prop_change):
assert child in self.children
if self.parent:
self.parent.set_up_proportion(self, prop_change)
def set_down_proportion(self, child, prop_change):
assert child in self.children
if self.parent:
self.parent.set_down_proportion(self, prop_change)
def set_left_proportion(self, child, prop_change):
assert child in self.children
left = self.children[:self.children.index(child)]
if left:
add_to = -prop_change / len(left)
for c in left:
c.proportion += add_to
child.proportion += prop_change
def set_right_proportion(self, child, prop_change):
assert child in self.children
right = self.children[self.children.index(child) + 1:]
if right:
add_to = -prop_change / len(right)
for c in right:
c.proportion += add_to
child.proportion += prop_change
def _moveresize(self, x, y, w, h):
Frame._moveresize(self, x, y, w, h)
s_x = x
for child in self.children:
width = int(w * child.proportion)
child._moveresize(s_x, y, width, h)
s_x += width
class VerticalFrame(Frame):
def select(self, where, x=None, y=None, w=None, h=None):
if where == 'up':
return self.children[-1].select(
where=where, x=x, y=y, w=w, h=h
)
elif where == 'down':
return self.children[0].select(
where=where, x=x, y=y, w=w, h=h
)
elif where in ('left', 'right'):
if y is not None and h is not None:
# Find the frame with the most overlap...
overlap = []
for c in self.children:
overlap.append(intoverlap(
y, y + h,
c.y, c.y + c.h
))
mi = overlap.index(max(overlap))
return self.children[mi].select(
where=where, x=x, y=y, w=w, h=h
)
else:
return self.children[0].select(
where=where, x=x, y=y, w=w, h=h
)
return None
def set_up_proportion(self, child, prop_change):
assert child in self.children
up = self.children[:self.children.index(child)]
if up:
add_to = -prop_change / len(up)
for c in up:
c.proportion += add_to
child.proportion += prop_change
def set_down_proportion(self, child, prop_change):
assert child in self.children
down = self.children[self.children.index(child) + 1:]
if down:
add_to = -prop_change / len(down)
for c in down:
c.proportion += add_to
child.proportion += prop_change
def set_left_proportion(self, child, prop_change):
assert child in self.children
if self.parent:
self.parent.set_left_proportion(self, prop_change)
def set_right_proportion(self, child, prop_change):
assert child in self.children
if self.parent:
self.parent.set_right_proportion(self, prop_change)
def _moveresize(self, x, y, w, h):
Frame._moveresize(self, x, y, w, h)
s_y = y
for child in self.children:
height = int(h * child.proportion)
child._moveresize(x, s_y, w, height)
s_y += height
def intoverlap(s1, e1, s2, e2):
assert e1 > s1 and e2 > s2
L1, L2 = e1 - s1, e2 - s2
if s2 <= s1 and e2 >= e1:
return L1
elif e2 < s1 or s2 > e1:
return 0
elif s2 >= s1 and e2 <= e1:
return L2
elif s2 < s1:
return e2 - s1
elif s2 < e1:
return e1 - s2
return None
| Python |
import ptxcb
class Workspace(object):
WORKSPACES = {}
@staticmethod
def add(wsid):
Desktop.add(wsid)
@staticmethod
def iter_all_monitors():
for wsid in Workspace.WORKSPACES:
for mon in Workspace.WORKSPACES[wsid].iter_monitors():
yield mon
@staticmethod
def remove(wsid):
Desktop.remove(wsid)
def __init__(self, wsid, x, y, width, height, total_width, total_height):
self.id = wsid
self.x = x
self.y = y
self.width = width
self.height = height
self.total_width = total_width
self.total_height = total_height
self.monitors = {}
def contains(self, wsid):
if wsid == 'all' or self.id == wsid:
return True
return False
def get_monitor(self, mid):
return self.monitors[mid]
def get_monitor_xy(self, x, y):
for mon in self.iter_monitors():
if mon.contains(x, y):
return mon
return None
def has_monitor(self, mid):
return mid in self.monitors
def iter_monitors(self):
for mid in self.monitors:
yield self.get_monitor(mid)
def __str__(self):
return 'Workspace %d - [X: %d, Y: %d, Width: %d, Height: %d]' % (
self.id, self.x, self.y, self.width, self.height
)
class Desktop(Workspace):
@staticmethod
def add(wsid):
geom = ptxcb.XROOT.get_desktop_geometry()
wa = ptxcb.XROOT.get_workarea()
Desktop.WORKSPACES[wsid] = Desktop(
wsid,
wa[wsid]['x'],
wa[wsid]['y'],
wa[wsid]['width'],
wa[wsid]['height'],
geom['width'],
geom['height']
)
@staticmethod
def remove(wsid):
del Workspace.WORKSPACES[wsid]
class Viewport(Workspace):
def __init__(self):
pass
| Python |
import time
import config
import ptxcb
import state
import tilers
from command import Command
from window import Window
from tile import Tile
class Dispatcher(object):
def __init__(self, event_data):
self._event_data = event_data
self._stop = False
assert 'event' in self._event_data
if hasattr(self, self._event_data['event']):
getattr(self, self._event_data['event'])()
else:
print 'Unrecognized event: %s' % self._event_data['event']
return
ptxcb.Window.exec_queue()
Tile.exec_queue()
ptxcb.connection.push()
def stop(self):
return self._stop
def KeyPressEvent(self):
cmd = Command.lookup(self._event_data['keycode'], self._event_data['modifiers'])
if not cmd:
return
x = cmd.get_global_command()
if x == 'quit':
for tiler in state.iter_tilers():
tiler.cmd_untile()
self._stop = True
elif x == 'debug':
state.print_hierarchy(*state.get_active_wsid_and_mid())
elif x == 'refresh_workarea':
state.update_property('_NET_WORKAREA')
elif x == 'reload_configuration':
config.load_config_file()
state.update_NET_DESKTOP_GEOMETRY(True)
state.apply_config()
else:
Tile.dispatch(state.get_active_monitor(), cmd)
def ConfigureNotifyEvent(self):
win = Window.deep_lookup(self._event_data['window'].wid)
mt_off = config.get_option(
'movetime_offset',
*state.get_active_wsid_and_mid()
)
if (
win and win.lives() and not win.floating and
(time.time() - win.pytyle_moved_time) > mt_off
):
if (
state.pointer_grab and
win.width == self._event_data['width'] and
win.height == self._event_data['height']
):
pointer = ptxcb.XROOT.query_pointer()
if ptxcb.XROOT.button_pressed():
state.moving = win
state.moving.moving = True
win.set_geometry(
self._event_data['x'],
self._event_data['y'],
self._event_data['width'],
self._event_data['height']
)
def PropertyNotifyEvent(self):
a = self._event_data['atom']
state.update_property(a)
if self._event_data['window']:
win = Window.lookup(self._event_data['window'].wid)
if win and win.lives():
win.update_property(a)
def FocusInEvent(self):
if self._event_data['mode'] == 'Ungrab':
state.pointer_grab = False
if state.moving:
win = state.moving
pointer = ptxcb.XROOT.query_pointer()
tiler = win.get_tiler()
if tiler:
if tiler.tiling:
tiler.mouse_switch(
win.container,
pointer.root_x,
pointer.root_y
)
tiler.enqueue()
state.moving.moving = False
state.moving = False
def FocusOutEvent(self):
if self._event_data['mode'] == 'Grab':
state.pointer_grab = True
| Python |
from tile import Tile
from container import Container
class AutoTile(Tile):
def __init__(self, monitor):
Tile.__init__(self, monitor)
self.store = None
self.cycle_index = 0
#
# Helper methods
#
def add(self, win):
if (
win.tilable() and self.tiling and
win.get_winclass().lower() not in self.get_option('ignore')
):
cont = Container(self, win)
self.store.add(cont)
self.enqueue()
def remove(self, win, reset_window=False):
if win.container and self.tiling:
self.store.remove(win.container)
win.container.remove(reset_window=reset_window)
self.enqueue()
def mouse_find(self, x, y):
if self.store:
for cont in self.store.all():
x1, x2 = cont.x, cont.x + cont.w
y1, y2 = cont.y, cont.y + cont.h
if (
x >= x1 and x <= x2 and
y >= y1 and y <= y2
):
return cont
return None
def mouse_switch(self, cont, x, y):
if self.store:
switch = self.mouse_find(x, y)
if switch:
cont.switch(switch)
def borders_add(self, do_window=True):
if self.store:
for cont in self.store.all():
cont.decorations(False, do_window)
def borders_remove(self, do_window=True):
if self.store:
for cont in self.store.all():
cont.decorations(True, do_window)
def destroy(self):
self.cmd_untile()
def detach(self):
self.tiling = False
if self.store:
for cont in self.store.all()[:]:
cont.remove()
self.store.reset()
def get_active(self):
active = self.monitor.get_active()
if active:
if active.container and active.container in self.store.all():
return active.container
elif self.store:
return self.store.all()[0]
return None
def get_active_cont(self):
return self.get_active()
def get_next(self):
active = self.get_active()
if active:
a = self.store.all()
m = self.store.masters
s = self.store.slaves
if active in m:
if m.index(active) == 0:
return a[(a.index(m[-1]) + 1) % len(a)]
else:
return a[(a.index(active) - 1) % len(a)]
else:
if m and s.index(active) == len(s) - 1:
return m[-1]
else:
return a[(a.index(active) + 1) % len(a)]
return None
def get_previous(self):
active = self.get_active()
if active:
a = self.store.all()
m = self.store.masters
s = self.store.slaves
if active in m:
if m.index(active) == len(m) - 1:
return a[-1]
else:
return a[(a.index(active) + 1) % len(a)]
else:
if m and s.index(active) == 0:
return m[0]
else:
return a[(a.index(active) - 1) % len(a)]
return None
#
# Commands
#
def cmd_cycle(self):
if self.store.masters and self.store.slaves:
if self.cycle_index >= len(self.store.slaves):
self.cycle_index = 0
master = self.store.masters[0]
slave = self.store.slaves[self.cycle_index]
master.switch(slave)
master.activate()
self.cycle_index += 1
def cmd_float(self):
active = self.monitor.get_active()
if active and active.monitor.workspace.id == self.workspace.id and active.monitor.id == self.monitor.id:
if not active.floating:
active.floating = True
self.remove(active, reset_window=True)
else:
active.floating = False
self.add(active)
def cmd_focus_master(self):
master = self.store.masters[0]
if master:
master.activate()
def cmd_increase_master(self):
pass
def cmd_decrease_master(self):
pass
def cmd_increment_masters(self):
self.store.inc_masters()
self.enqueue()
def cmd_decrement_masters(self):
self.store.dec_masters()
self.enqueue()
def cmd_make_active_master(self):
if self.store.masters:
active = self.get_active()
master = self.store.masters[0]
if active != master:
master.switch(active)
def cmd_next(self):
next = self.get_next()
if next:
next.activate()
def cmd_previous(self):
previous = self.get_previous()
if previous:
previous.activate()
def cmd_switch_next(self):
active = self.get_active()
next = self.get_next()
if active and next:
active.switch(next)
def cmd_switch_previous(self):
active = self.get_active()
previous = self.get_previous()
if active and previous:
active.switch(previous)
def cmd_tile(self):
Tile.cmd_tile(self)
if not self.store:
self.store = AutoStore()
if self.store.empty():
active = self.monitor.get_active()
if active:
self.add(active)
for win in self.monitor.iter_windows():
if win != active:
self.add(win)
def cmd_untile(self):
Tile.cmd_untile(self)
if self.store:
for cont in self.store.all()[:]:
cont.remove(reset_window=True)
self.store.reset()
class AutoStore(object):
def __init__(self):
self.masters = []
self.slaves = []
self.mcnt = 1
self.changes = False
def made_changes(self):
if self.changes:
self.changes = False
return True
return False
def add(self, cont, top = False):
if len(self.masters) < self.mcnt:
if cont in self.slaves:
self.slaves.remove(cont)
if top:
self.masters.insert(0, cont)
else:
self.masters.append(cont)
self.changes = True
elif cont not in self.slaves:
if top:
self.slaves.insert(0, cont)
else:
self.slaves.append(cont)
self.changes = True
def empty(self):
return not self.masters and not self.slaves
def remove(self, cont):
if cont in self.masters:
self.masters.remove(cont)
if len(self.masters) < self.mcnt and self.slaves:
self.masters.append(self.slaves.pop(0))
self.changes = True
elif cont in self.slaves:
self.slaves.remove(cont)
self.changes = True
def reset(self):
self.masters = []
self.slaves = []
self.changes = False
def switch(self, cont1, cont2):
if cont1 in self.masters and cont2 in self.masters:
i1, i2 = self.masters.index(cont1), self.masters.index(cont2)
self.masters[i1], self.masters[i2] = self.masters[i2], self.masters[i1]
elif cont1 in self.slaves and cont2 in self.slaves:
i1, i2 = self.slaves.index(cont1), self.slaves.index(cont2)
self.slaves[i1], self.slaves[i2] = self.slaves[i2], self.slaves[i1]
elif cont1 in self.masters: # and cont2 in self.slaves
i1, i2 = self.masters.index(cont1), self.slaves.index(cont2)
self.masters[i1], self.slaves[i2] = self.slaves[i2], self.masters[i1]
else: # cont1 in self.slaves and cont2 in self.masters
i1, i2 = self.slaves.index(cont1), self.masters.index(cont2)
self.slaves[i1], self.masters[i2] = self.masters[i2], self.slaves[i1]
def inc_masters(self):
self.mcnt = min(self.mcnt + 1, len(self.all()))
if len(self.masters) < self.mcnt and self.slaves:
self.masters.append(self.slaves.pop(0))
def dec_masters(self):
if self.mcnt <= 0:
return
self.mcnt -= 1
if len(self.masters) > self.mcnt:
self.slaves.append(self.masters.pop())
def all(self):
return self.masters + self.slaves
def __str__(self):
r = 'Masters: %s\n' % [cont.get_name() for cont in self.masters]
r += 'Slaves: %s\n' % [cont.get_name() for cont in self.slaves]
return r
| Python |
import re
import config
from command import Command
from container import Container
class Tile(object):
queue_tile = set()
@staticmethod
def dispatch(monitor, command):
assert isinstance(command, Command)
tiler = monitor.get_tiler()
if tiler:
if tiler.get_name() == 'ManualTile':
cmd_nm = command.get_manual_command()
else:
cmd_nm = command.get_auto_command()
if cmd_nm and hasattr(tiler, 'cmd_' + cmd_nm):
if cmd_nm == 'tile':
tiler.enqueue(force_tiling=True)
elif tiler.tiling:
getattr(tiler, 'cmd_' + cmd_nm)()
elif (tiler.get_option('always_monitor_cmd') and
re.match('screen[0-9]_(focus|put)', cmd_nm)):
getattr(tiler, 'cmd_' + cmd_nm)()
elif cmd_nm and cmd_nm.startswith('tile.'):
tiler.monitor.cycle(tiler_name=cmd_nm[cmd_nm.index('.') + 1:])
else:
print 'Invalid command %s' % cmd_nm
@staticmethod
def exec_queue():
for tiler in Tile.queue_tile:
tiler.cmd_tile()
Tile.queue_tile = set()
#
# Helper methods
# These are responsible for some utility work common to all
# tilers. Such as moving windows from one monitor to the next,
# toggling decorations/borders, and handling high-level functions
# like callbacks for hiding/showing the tiler, or if an error occurs.
#
def __init__(self, monitor):
self.workspace = monitor.workspace
self.monitor = monitor
self.tiling = False
self.decor = self.get_option('decorations')
self.borders = self.get_option('borders')
self.queue_error = set()
def borders_add(self, do_window=True):
pass
def borders_remove(self, do_window=True):
pass
def callback_hidden(self):
if not self.decor:
self.borders_remove(do_window=False)
def callback_visible(self):
if not self.decor:
self.borders_add(do_window=False)
def enqueue(self, force_tiling=False):
if self.tiling or force_tiling:
Tile.queue_tile.add(self)
def error_clear(self):
self.queue_error = set()
def error_exec_callbacks(self):
for err in self.queue_error:
err()
self.error_clear()
def error_register_callback(self, exc):
self.queue_error.add(exc)
def get_name(self):
return self.__class__.__name__
def get_option(self, option):
return config.get_option(
option,
self.workspace.id,
self.monitor.id,
self.get_name()
)
def mouse_find(self, x, y):
pass
def mouse_switch(self, x, y):
pass
def screen_focus(self, mid):
if not self.workspace.has_monitor(mid):
return
new_tiler = self.workspace.get_monitor(mid).get_tiler()
if new_tiler.tiling:
if self != new_tiler:
if new_tiler:
active = new_tiler.get_active_cont()
if not active:
active = self.workspace.get_monitor(mid).get_active()
if active:
active.activate()
else:
mon = self.workspace.get_monitor(mid)
active = mon.get_active()
if active:
active.activate()
def screen_put(self, mid):
if not self.workspace.has_monitor(mid):
return
if self.tiling:
active = self.get_active_cont()
new_tiler = self.workspace.get_monitor(mid).get_tiler()
if new_tiler != self and active and new_tiler.tiling:
active.win.set_monitor(self.workspace.id, mid)
elif active and self.monitor.id != mid:
mon = self.workspace.get_monitor(mid)
active.win.moveresize(mon.wa_x, mon.wa_y,
active.w if active.w < mon.wa_width
else mon.wa_width,
active.h if active.h < mon.wa_height
else mon.wa_height)
active.win.set_monitor(self.workspace.id, mid)
else:
active = self.monitor.get_active()
mon = self.workspace.get_monitor(mid)
active.moveresize(mon.wa_x, mon.wa_y,
active.width if active.width < mon.wa_width else mon.wa_width,
active.height if active.height < mon.wa_height else mon.wa_height)
active.set_monitor(self.workspace.id, mid)
#
# Commands
# Functions called directly by pressing a key.
#
def cmd_cycle_tiler(self):
self.monitor.cycle()
def cmd_reset(self):
self.monitor.tile_reset()
def cmd_screen0_focus(self):
self.screen_focus(0)
def cmd_screen1_focus(self):
self.screen_focus(1)
def cmd_screen2_focus(self):
self.screen_focus(2)
def cmd_screen0_put(self):
self.screen_put(0)
def cmd_screen1_put(self):
self.screen_put(1)
def cmd_screen2_put(self):
self.screen_put(2)
def cmd_tile(self):
self.tiling = True
self.monitor.tiler = self
def cmd_toggle_borders(self):
self.borders = not self.borders
if not self.decor:
if self.borders:
self.borders_add(do_window=False)
else:
self.borders_remove(do_window=False)
def cmd_toggle_decorations(self):
self.decor = not self.decor
if self.decor:
self.borders_remove()
else:
self.borders_add()
Container.manage_focus(self.monitor.get_active())
def cmd_untile(self):
self.tiling = False
self.monitor.tiler = None
| Python |
import time
import ptxcb
import config
from command import Command
from window import Window
from monitor import Monitor
from workspace import Workspace
from container import Container
_ACTIVE = None
pointer_grab = False
moving = False
properties = {}
xinerama = ptxcb.connection.xinerama_get_screens()
def init():
reset_properties()
load_properties()
def apply_config():
Command.init()
for mon in Workspace.iter_all_monitors():
if config.get_option('tile_on_startup', mon.workspace.id, mon.id):
mon.tile(force_tiling=True)
def get_active():
global _ACTIVE
return _ACTIVE
def get_active_monitor():
wsid, mid = get_active_wsid_and_mid()
return get_monitor(wsid, mid)
def get_active_wsid_and_mid():
wsid = -1
mid = -1
win = get_active()
if win and win.monitor and win.monitor.workspace:
wsid = win.monitor.workspace.id
mid = win.monitor.id
else:
wsid, mid = get_pointer_wsid_and_mid()
return (wsid, mid)
def get_monitor(wsid, mid):
return Workspace.WORKSPACES[wsid].get_monitor(mid)
def get_pointer_wsid_and_mid():
wsid = -1
mid = -1
px, py = ptxcb.XROOT.get_pointer_position()
wsid = properties['_NET_CURRENT_DESKTOP']
for mon in Workspace.WORKSPACES[wsid].iter_monitors():
if mon.contains(px, py):
mid = mon.id
break
return wsid, mid
def iter_tilers(workspaces=None, monitors=None):
if isinstance(workspaces, int):
workspaces = [workspaces]
if isinstance(monitors, int):
monitors = [monitors]
for wsid in Workspace.WORKSPACES:
if workspaces is None or wsid in workspaces:
for mon in Workspace.WORKSPACES[wsid].iter_monitors():
if monitors is None or mon.id in monitors:
tiler = mon.get_tiler()
if tiler and tiler.tiling:
yield tiler
def iter_windows(workspaces=None, monitors=None):
if isinstance(workspaces, int):
workspaces = [workspaces]
if isinstance(monitors, int):
monitors = [monitors]
for wsid in Workspace.WORKSPACES:
if workspaces is None or wsid in workspaces:
for mon in Workspace.WORKSPACES[wsid].iter_monitors():
if monitors is None or mon.id in monitors:
for win in mon.iter_windows():
yield win
def load_properties():
property_order = [
'_NET_CURRENT_DESKTOP',
'_NET_NUMBER_OF_DESKTOPS',
'_NET_WORKAREA',
'_NET_CLIENT_LIST',
'_NET_ACTIVE_WINDOW',
]
for pname in property_order:
update_property(pname)
def print_hierarchy(workspaces=None, monitors=None):
if isinstance(workspaces, int):
workspaces = [workspaces]
if isinstance(monitors, int):
monitors = [monitors]
for wsid in Workspace.WORKSPACES:
if workspaces is None or wsid in workspaces:
print Workspace.WORKSPACES[wsid]
for mon in Workspace.WORKSPACES[wsid].iter_monitors():
if monitors is None or mon.id in monitors:
print '\t%s' % mon
for win in mon.windows:
print '\t\t%s' % win
def reset_properties():
global properties
properties = {
'_NET_ACTIVE_WINDOW': '',
'_NET_CLIENT_LIST': set(),
'_NET_WORKAREA': [],
'_NET_NUMBER_OF_DESKTOPS': 0,
'_NET_DESKTOP_GEOMETRY': {},
'_NET_CURRENT_DESKTOP': -1,
}
def set_active(wid):
global _ACTIVE
if wid in Window.WINDOWS:
_ACTIVE = Window.WINDOWS[wid]
else:
_ACTIVE = None
def update_property(pname):
mname = 'update%s' % pname
gs = globals()
if mname in gs:
m = gs[mname]
m()
def update_NET_ACTIVE_WINDOW():
global properties
active = ptxcb.XROOT.get_active_window()
if not active:
return
set_active(active)
properties['_NET_ACTIVE_WINDOW'] = get_active()
active = get_active()
if active and active.monitor:
active.monitor.active = active
Container.manage_focus(active)
def update_NET_CLIENT_LIST():
global properties
old = properties['_NET_CLIENT_LIST']
new = set(ptxcb.XROOT.get_window_ids())
properties['_NET_CLIENT_LIST'] = new
if old != new:
for wid in new.difference(old):
Window.add(wid)
for wid in old.difference(new):
Window.remove(wid)
# This might be redundant, but it's important to know
# the new active window if the old one was destroyed
# as soon as possible
update_NET_ACTIVE_WINDOW()
def update_NET_CURRENT_DESKTOP():
global properties
old = properties['_NET_CURRENT_DESKTOP']
properties['_NET_CURRENT_DESKTOP'] = ptxcb.XROOT.get_current_desktop()
if old != properties['_NET_CURRENT_DESKTOP']:
Container.active = None
for tiler in iter_tilers(old):
tiler.callback_hidden()
for tiler in iter_tilers(properties['_NET_CURRENT_DESKTOP']):
tiler.callback_visible()
def update_NET_WORKAREA():
global properties
properties['_NET_WORKAREA'] = ptxcb.XROOT.get_workarea()
for mon in Workspace.iter_all_monitors():
mon.calculate_workarea()
def update_NET_NUMBER_OF_DESKTOPS():
global properties, xinerama
old = properties['_NET_NUMBER_OF_DESKTOPS']
properties['_NET_NUMBER_OF_DESKTOPS'] = ptxcb.XROOT.get_number_of_desktops()
# Add destops...
if old < properties['_NET_NUMBER_OF_DESKTOPS']:
for wsid in xrange(old, properties['_NET_NUMBER_OF_DESKTOPS']):
Workspace.add(wsid)
Monitor.add(wsid, xinerama)
# Remove desktops
elif old > properties['_NET_NUMBER_OF_DESKTOPS']:
for wsid in xrange(properties['_NET_NUMBER_OF_DESKTOPS'], old):
Monitor.remove(wsid)
Workspace.remove(wsid)
def update_NET_DESKTOP_GEOMETRY(force=False):
global properties, xinerama
old_geom = properties['_NET_DESKTOP_GEOMETRY']
old_xinerama = xinerama
time.sleep(1)
properties['_NET_DESKTOP_GEOMETRY'] = ptxcb.XROOT.get_desktop_geometry()
xinerama = ptxcb.connection.xinerama_get_screens()
if old_xinerama != xinerama or force:
if not force and len(old_xinerama) == len(xinerama):
for mon in Workspace.iter_all_monitors():
mon.refresh_bounds(
xinerama[mid]['x'],
xinerama[mid]['y'],
xinerama[mid]['width'],
xinerama[mid]['height']
)
mon.calculate_workarea()
else:
for mon in Workspace.iter_all_monitors():
for tiler in mon.tilers:
tiler.destroy()
for wid in Window.WINDOWS.keys():
Window.remove(wid)
for wsid in Workspace.WORKSPACES.keys():
Monitor.remove(wsid)
Workspace.remove(wsid)
reset_properties()
load_properties()
init()
| Python |
import math
from pt.tile_auto import AutoTile
class Vertical(AutoTile):
def __init__(self, monitor):
AutoTile.__init__(self, monitor)
self.hsplit = self.get_option('width_factor')
#
# Helper methods
#
def decrement_hsplit(self):
self.hsplit -= self.get_option('step_size')
def increment_hsplit(self):
self.hsplit += self.get_option('step_size')
#
# Commands
#
def cmd_tile(self):
AutoTile.cmd_tile(self)
m_size = len(self.store.masters)
s_size = len(self.store.slaves)
if not m_size and not s_size:
return
m_width = int(self.monitor.wa_width * self.hsplit)
s_width = self.monitor.wa_width - m_width
m_x = self.monitor.wa_x
s_x = m_x + m_width
if (
m_width <= 0 or m_width > self.monitor.wa_width or
s_width <= 0 or s_width > self.monitor.wa_width
):
self.error_exec_callbacks()
return
if m_size:
m_height = self.monitor.wa_height / m_size
if not s_size:
m_width = self.monitor.wa_width
for i, cont in enumerate(self.store.masters):
cont.moveresize(
m_x,
self.monitor.wa_y + i * m_height,
m_width,
m_height
)
if s_size:
s_height = self.monitor.wa_height / s_size
if not m_size:
s_width = self.monitor.wa_width
s_x = self.monitor.wa_x
for i, cont in enumerate(self.store.slaves):
cont.moveresize(
s_x,
self.monitor.wa_y + i * s_height,
s_width,
s_height
)
# If we've made it this far, then we've supposedly tiled correctly
self.error_clear()
def cmd_decrease_master(self):
self.decrement_hsplit()
self.error_register_callback(self.increment_hsplit)
self.enqueue()
def cmd_increase_master(self):
self.increment_hsplit()
self.error_register_callback(self.decrement_hsplit)
self.enqueue()
class VerticalRows(Vertical):
def __init__(self, monitor):
Vertical.__init__(self, monitor)
self.rows = self.get_option('rows')
#
# Commands
#
def cmd_tile(self):
AutoTile.cmd_tile(self)
m_size = len(self.store.masters)
s_size = len(self.store.slaves)
if not m_size and not s_size:
return
columns = int(math.ceil(float(s_size) / float(self.rows)))
lastcolumn_rows = s_size % self.rows or self.rows
m_width = int(self.monitor.wa_width * self.hsplit)
if not columns:
s_width = 1
else:
s_width = (self.monitor.wa_width - m_width) / columns
m_x = self.monitor.wa_x
s_x = m_x + m_width
if (
m_width <= 0 or m_width > self.monitor.wa_width or
s_width <= 0 or s_width > self.monitor.wa_width
):
self.error_exec_callbacks()
return
if m_size:
m_height = self.monitor.wa_height / m_size
if not s_size:
m_width = self.monitor.wa_width
for i, cont in enumerate(self.store.masters):
cont.moveresize(
m_x,
self.monitor.wa_y + i * m_height,
m_width,
m_height
)
if s_size:
s_height = self.monitor.wa_height / self.rows
if not m_size:
s_width = self.monitor.wa_width / columns
s_x = self.monitor.wa_x
column = 0
for i, cont in enumerate(self.store.slaves):
if column == columns - 1:
s_height = self.monitor.wa_height / lastcolumn_rows
cont.moveresize(
s_x + column * s_width,
self.monitor.wa_y + (i % self.rows) * s_height,
s_width,
s_height
)
if not (i + 1) % self.rows:
column += 1
# If we've made it this far, then we've supposedly tiled correctly
self.error_clear()
| Python |
import math
from pt.tile_auto import AutoTile
class Center(AutoTile):
def __init__(self, monitor):
AutoTile.__init__(self, monitor)
self.hsplit = self.get_option('width_factor')
self.vsplit = self.get_option('height_factor')
self.columns = self.get_option('columns')
#
# Helper methods
#
def lower_master(self):
for cont in self.store.slaves:
cont.window_raise()
def decrement_hsplit(self):
self.hsplit -= self.get_option('step_size')
def increment_hsplit(self):
self.hsplit += self.get_option('step_size')
def decrement_vsplit(self):
self.vsplit -= self.get_option('step_size')
def increment_vsplit(self):
self.vsplit += self.get_option('step_size')
#
# Commands
#
def cmd_tile(self):
AutoTile.cmd_tile(self)
m_size = len(self.store.masters)
s_size = len(self.store.slaves)
if not m_size and not s_size:
return
rows = int(math.ceil(float(s_size) / float(self.columns)))
lastrow_columns = s_size % self.columns or self.columns
m_width = int(self.monitor.wa_width * self.hsplit)
m_height = int(self.monitor.wa_height * self.vsplit)
m_x = self.monitor.wa_x + int((self.monitor.wa_width - m_width) / 2)
m_y = self.monitor.wa_y + int((self.monitor.wa_height - m_height) / 2)
s_width = int(self.monitor.wa_width / self.columns)
if not rows:
s_height = 1
else:
s_height = int(self.monitor.wa_height / rows)
s_x = self.monitor.wa_x
s_y = self.monitor.wa_y
if (
m_width <= 0 or m_width > self.monitor.wa_width or
s_width <= 0 or s_width > self.monitor.wa_width or
m_height <= 0 or m_height > self.monitor.wa_height or
s_height <= 0 or s_height > self.monitor.wa_height
):
self.error_exec_callbacks()
return
for i, cont in enumerate(self.store.masters):
cont.moveresize(
m_x,
m_y,
m_width,
m_height
)
for i, cont in enumerate(self.store.slaves):
if i / self.columns == rows - 1:
s_width = self.monitor.wa_width / lastrow_columns
cont.moveresize(
s_x + (i % self.columns) * s_width,
s_y + (i / self.columns) * s_height,
s_width,
s_height
)
# If we've made it this far, then we've supposedly tiled correctly
self.error_clear()
def cmd_decrease_master(self):
self.decrement_hsplit()
self.decrement_vsplit()
self.error_register_callback(self.increment_hsplit)
self.error_register_callback(self.increment_vsplit)
self.enqueue()
def cmd_increase_master(self):
self.increment_hsplit()
self.increment_vsplit()
self.error_register_callback(self.decrement_hsplit)
self.error_register_callback(self.decrement_vsplit)
self.enqueue()
def cmd_next(self):
self.lower_master()
AutoTile.cmd_next(self)
def cmd_previous(self):
self.lower_master()
AutoTile.cmd_previous(self)
def cmd_decrement_masters(self):
pass
def cmd_increment_masters(self):
pass
| Python |
from pt.tile_auto import AutoTile
class Maximal(AutoTile):
def __init__(self, monitor):
AutoTile.__init__(self, monitor)
#
# Commands
#
def cmd_tile(self):
AutoTile.cmd_tile(self)
if not self.store.all():
return
# Do master last, in case decorations are disabled
# and we need to draw the "active" border (so it
# over laps the "inactive" borders).
for cont in sorted(self.store.all(), reverse=True):
cont.moveresize(
self.monitor.wa_x,
self.monitor.wa_y,
self.monitor.wa_width,
self.monitor.wa_height
)
# If we've made it this far, then we've supposedly tiled correctly
self.error_clear()
def cmd_decrement_masters(self):
pass
def cmd_increment_masters(self):
pass
| Python |
import pt.tile_manual
import cascade
import center
import horizontal
import maximal
import vertical
ManualTile = pt.tile_manual.ManualTile
Cascade = cascade.Cascade
Center = center.Center
Horizontal = horizontal.Horizontal
HorizontalRows = horizontal.HorizontalRows
Maximal = maximal.Maximal
Vertical = vertical.Vertical
VerticalRows = vertical.VerticalRows
| Python |
import math
from pt.tile_auto import AutoTile
class Cascade(AutoTile):
def __init__(self, monitor):
AutoTile.__init__(self, monitor)
self.hsplit = self.get_option('width_factor')
self.vsplit = self.get_option('height_factor')
#
# Helper methods
#
def raise_active(self):
active = self.get_active()
if active:
active.window_raise()
def restack(self):
for cont in self.store.slaves:
cont.window_raise()
for cont in self.store.masters:
cont.window_raise()
def decrement_hsplit(self):
self.hsplit -= self.get_option('step_size')
def increment_hsplit(self):
self.hsplit += self.get_option('step_size')
def decrement_vsplit(self):
self.vsplit -= self.get_option('step_size')
def increment_vsplit(self):
self.vsplit += self.get_option('step_size')
#
# Commands
#
def cmd_tile(self):
AutoTile.cmd_tile(self)
m_size = len(self.store.masters)
s_size = len(self.store.slaves)
if not m_size and not s_size:
return
push_down = self.get_option('push_down')
push_over = self.get_option('push_over')
if self.get_option('horz_align') == 'right':
push_over = -push_over
m_width = int(
self.monitor.wa_width * self.hsplit - push_over * s_size
)
m_height = int(
self.monitor.wa_height * self.vsplit - push_down * s_size
)
m_y = self.monitor.wa_y + push_down * s_size
s_width = int(self.monitor.wa_width * self.hsplit)
s_height = int(self.monitor.wa_height * self.vsplit)
s_y = self.monitor.wa_y
if (
m_width <= 0 or m_width > self.monitor.wa_width or
s_width <= 0 or s_width > self.monitor.wa_width or
m_height <= 0 or m_height > self.monitor.wa_height or
s_height <= 0 or s_height > self.monitor.wa_height
):
self.error_exec_callbacks()
return
if self.get_option('horz_align') == 'right':
m_x = (
self.monitor.wa_x +
(self.monitor.wa_width - m_width) +
(push_over * s_size)
)
s_x = self.monitor.wa_x + (self.monitor.wa_width - s_width)
else:
m_x = self.monitor.wa_x + (push_over * s_size)
s_x = self.monitor.wa_x
for i, cont in enumerate(self.store.slaves):
cont.moveresize(
s_x + (i * push_over),
s_y + (i * push_down),
s_width - (i * push_over),
s_height - (i * push_down)
)
cont.window_raise()
for i, cont in enumerate(self.store.masters):
cont.moveresize(
m_x,
m_y,
m_width,
m_height
)
cont.window_raise()
self.raise_active()
# If we've made it this far, then we've supposedly tiled correctly
self.error_clear()
def cmd_decrease_master(self):
self.decrement_hsplit()
self.decrement_vsplit()
self.error_register_callback(self.increment_hsplit)
self.error_register_callback(self.increment_vsplit)
self.enqueue()
def cmd_increase_master(self):
self.increment_hsplit()
self.increment_vsplit()
self.error_register_callback(self.decrement_hsplit)
self.error_register_callback(self.decrement_vsplit)
self.enqueue()
def cmd_cycle(self):
AutoTile.cmd_cycle(self)
self.restack()
def cmd_focus_master(self):
self.restack()
AutoTile.cmd_focus_master(self)
def cmd_make_active_master(self):
AutoTile.cmd_make_active_master(self)
self.restack()
self.raise_active()
def cmd_next(self):
self.restack()
AutoTile.cmd_next(self)
def cmd_previous(self):
self.restack()
AutoTile.cmd_previous(self)
def cmd_switch_next(self):
AutoTile.cmd_switch_next(self)
self.restack()
self.raise_active()
def cmd_switch_previous(self):
AutoTile.cmd_switch_previous(self)
self.restack()
self.raise_active()
def decrement_masters(self):
pass
def increment_masters(self):
pass
| Python |
import math
from pt.tile_auto import AutoTile
class Horizontal(AutoTile):
def __init__(self, monitor):
AutoTile.__init__(self, monitor)
self.vsplit = self.get_option('height_factor')
#
# Helper methods
#
def decrement_vsplit(self):
self.vsplit -= self.get_option('step_size')
def increment_vsplit(self):
self.vsplit += self.get_option('step_size')
#
# Commands
#
def cmd_tile(self):
AutoTile.cmd_tile(self)
m_size = len(self.store.masters)
s_size = len(self.store.slaves)
if not m_size and not s_size:
return
m_height = int(self.monitor.wa_height * self.vsplit)
s_height = self.monitor.wa_height - m_height
m_y = self.monitor.wa_y
s_y = m_y + m_height
if (
m_height <= 0 or m_height > self.monitor.wa_height or
s_height <= 0 or s_height > self.monitor.wa_height
):
self.error_exec_callbacks()
return
if m_size:
m_width = self.monitor.wa_width / m_size
if not s_size:
m_height = self.monitor.wa_height
for i, cont in enumerate(self.store.masters):
cont.moveresize(
self.monitor.wa_x + i * m_width,
m_y,
m_width,
m_height
)
if s_size:
s_width = self.monitor.wa_width / s_size
if not m_size:
s_height = self.monitor.wa_height
s_y = self.monitor.wa_y
for i, cont in enumerate(self.store.slaves):
cont.moveresize(
self.monitor.wa_x + i * s_width,
s_y,
s_width,
s_height
)
# If we've made it this far, then we've supposedly tiled correctly
self.error_clear()
def cmd_decrease_master(self):
self.decrement_vsplit()
self.error_register_callback(self.increment_vsplit)
self.enqueue()
def cmd_increase_master(self):
self.increment_vsplit()
self.error_register_callback(self.decrement_vsplit)
self.enqueue()
class HorizontalRows(Horizontal):
def __init__(self, monitor):
Horizontal.__init__(self, monitor)
self.columns = self.get_option('columns')
#
# Commands
#
def cmd_tile(self):
AutoTile.cmd_tile(self)
m_size = len(self.store.masters)
s_size = len(self.store.slaves)
if not m_size and not s_size:
return
rows = int(math.ceil(float(s_size) / float(self.columns)))
lastrow_columns = s_size % self.columns or self.columns
m_height = int(self.monitor.wa_height * self.vsplit)
if not rows:
s_height = 1
else:
s_height = (self.monitor.wa_height - m_height) / rows
m_y = self.monitor.wa_y
s_y = m_y + m_height
if (
m_height <= 0 or m_height > self.monitor.wa_height or
s_height <= 0 or s_height > self.monitor.wa_height
):
self.error_exec_callbacks()
return
if m_size:
m_width = self.monitor.wa_width / m_size
if not s_size:
m_height = self.monitor.wa_height
for i, cont in enumerate(self.store.masters):
cont.moveresize(
self.monitor.wa_x + i * m_width,
m_y,
m_width,
m_height
)
if s_size:
s_width = self.monitor.wa_width / self.columns
if not m_size:
s_height = self.monitor.wa_height / rows
s_y = self.monitor.wa_y
for i, cont in enumerate(self.store.slaves):
if i / self.columns == rows - 1:
s_width = self.monitor.wa_width / lastrow_columns
cont.moveresize(
self.monitor.wa_x + (i % self.columns) * s_width,
s_y + (i / self.columns) * s_height,
s_width,
s_height
)
# If we've made it this far, then we've supposedly tiled correctly
self.error_clear()
| Python |
class Grid(object):
def __init__(self):
pass
| Python |
# Library imports
import config
import ptxcb
import tilers
# Class imports
from workspace import Workspace
class Monitor(object):
@staticmethod
def add(wsid, xinerama):
for mid, screen in enumerate(xinerama):
new_mon = Monitor(
Workspace.WORKSPACES[wsid],
mid,
screen['x'],
screen['y'],
screen['width'],
screen['height']
)
Workspace.WORKSPACES[wsid].monitors[mid] = new_mon
@staticmethod
def remove(wsid):
for mon in Workspace.WORKSPACES[wsid].iter_monitors():
for tiler in mon.tilers:
tiler.destroy()
Workspace.WORKSPACES[wsid].monitors = {}
def __init__(self, workspace, mid, x, y, width, height):
self.workspace = workspace
self.id = mid
self.x = x
self.y = y
self.width = width
self.height = height
self.windows = set()
self.active = None
self.tiler = None
self.auto = True
self.tilers = []
# Attach tilers...
for tile_name in config.get_option('tilers', self.workspace.id, self.id):
if hasattr(tilers, tile_name):
tiler = getattr(tilers, tile_name)
self.add_tiler(tiler(self))
def add_tiler(self, tiler):
if tiler.get_name() in config.get_option('all_tilers'):
self.tilers.append(tiler)
def add_window(self, win):
self.windows.add(win)
if win.id == ptxcb.XROOT.get_active_window():
self.active = win
if self.get_tiler():
self.get_tiler().add(win)
def calculate_workarea(self):
self.wa_x = self.x
self.wa_y = self.y
self.wa_width = self.width
self.wa_height = self.height
if self.get_tiler():
margin = self.get_tiler().get_option('margin')
else:
margin = config.get_option('margin', self.workspace.id, self.id)
if margin and len(margin) == 4:
# margin = top(0) right(1) bottom(2) left(3)
self.wa_x += margin[3]
self.wa_y += margin[0]
self.wa_width -= margin[1] + margin[3]
self.wa_height -= margin[0] + margin[2]
else:
wids = ptxcb.XROOT.get_window_ids()
# Keep track of what we've added...
# If we come across a window with the same exact
# size/position/struts, skip it!
log = []
for wid in wids:
win = ptxcb.Window(wid)
# We're listening to _NET_WORKAREA, so a panel
# might have died before _NET_CLIENT_LIST was updated...
try:
x, y, w, h = win.get_geometry()
d = win.get_desktop_number()
except:
continue
if self.workspace.contains(win.get_desktop_number()) and self.contains(x, y):
struts = win.get_strut_partial()
if not struts:
struts = win.get_strut()
key = (x, y, w, h, struts)
if key in log:
continue
log.append(key)
if struts and not all([struts[i] == 0 for i in struts]):
if struts['left'] or struts['right']:
if struts['left']:
self.wa_x += w
self.wa_width -= w
if struts['top'] or struts['bottom']:
if struts['top']:
self.wa_y += h
self.wa_height -= h
elif struts:
# When accounting for struts on left/right, and
# struts are reported properly, x shouldn't be
# zero. Similarly for top/bottom and y.
if x > 0 and self.width == (x + w):
self.wa_width -= w
elif y > 0 and self.height == (y + h):
self.wa_height -= h
elif x > 0 and self.wa_x == x:
self.wa_x += w
self.wa_width -= w
elif y > 0 and self.wa_y == y:
self.wa_y += h
self.wa_height -= h
self.tile()
def contains(self, x, y):
if x >= self.x and y >= self.y and x < (self.x + self.width) and y < (self.y + self.height):
return True
if (x < 0 or y < 0) and self.x == 0 and self.y == 0:
return True
return False
def cycle(self, tiler_name=None):
force_tiling = False
named = [t.get_name() for t in self.tilers]
named_tiler = None
if tiler_name and tiler_name in named:
named_tiler = self.tilers[named.index(tiler_name)]
force_tiling=True
elif tiler_name:
return
if self.get_tiler() and self.get_tiler().tiling:
force_tiling = True
self.get_tiler().detach()
if named_tiler:
self.tiler = named_tiler
else:
self.tiler = self.tilers[
(self.tilers.index(self.tiler) + 1) % len(self.tilers)
]
self.calculate_workarea()
self.tile(force_tiling)
def get_active(self):
if not self.active:
if self.windows:
self.active = [w for w in self.windows][0]
return self.active
def get_tiler(self):
if not self.tilers:
return None
if not self.tiler:
self.tiler = self.tilers[0]
return self.tiler
def iter_windows(self):
copy = set(self.windows)
for win in copy:
yield win
def refresh_bounds(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def remove_window(self, win):
if win in self.windows:
self.windows.remove(win)
if self.active == win:
if self.windows:
self.active = [w for w in self.windows][0]
else:
self.active = None
if self.get_tiler():
self.get_tiler().remove(win)
def tile(self, force_tiling=False):
tiler = self.get_tiler()
if tiler:
self.get_tiler().enqueue(force_tiling=force_tiling)
def tile_reset(self):
i = self.tilers.index(self.get_tiler())
tile_name = self.tilers[i].get_name()
if hasattr(tilers, tile_name):
self.get_tiler().detach()
self.tilers[i] = getattr(tilers, tile_name)(self)
self.tiler = self.tilers[i]
self.tile(force_tiling=True)
def __str__(self):
return 'Monitor %d - [WORKSPACE: %d, X: %d, Y: %d, Width: %d, Height: %d]' % (
self.id, self.workspace.id, self.x, self.y, self.width, self.height
)
| Python |
import time
import ptxcb
import config
from workspace import Workspace
class Window(object):
WINDOWS = {}
@staticmethod
def add(wid):
if wid not in Window.WINDOWS:
if Window.manageable(wid):
win = Window(wid)
Window.WINDOWS[wid] = win
return win
return None
@staticmethod
def deep_lookup(wid):
ret = Window.lookup(wid)
if ret:
return ret
children = ptxcb.Window(wid).query_tree_children()
if children:
for child_wid in children:
ret = Window.deep_lookup(child_wid)
if ret:
return ret
return None
@staticmethod
def lookup(wid):
if wid in Window.WINDOWS:
return Window.WINDOWS[wid]
return None
@staticmethod
def manageable(wid):
win = ptxcb.Window(wid)
win_types = win.get_types()
if not win_types or '_NET_WM_WINDOW_TYPE_NORMAL' in win_types:
states = win.get_states()
if ('_NET_WM_STATE_MODAL' not in states and
'_NET_WM_STATE_SHADED' not in states and
'_NET_WM_STATE_SKIP_TASKBAR' not in states and
'_NET_WM_STATE_SKIP_PAGER' not in states and
'_NET_WM_STATE_FULLSCREEN' not in states):
return True
return False
@staticmethod
def remove(wid):
win = Window.lookup(wid)
if win:
del Window.WINDOWS[wid]
win.monitor.remove_window(win)
return win
return None
def __init__(self, wid):
self.id = wid
self._xwin = ptxcb.Window(wid)
self.container = None
self.monitor = None
self.floating = False
self.pytyle_moved_time = 0
self.moving = False
self.properties = {
'_NET_WM_NAME': '',
'_NET_WM_DESKTOP': '',
'_NET_WM_WINDOW_TYPE': set(),
'_NET_WM_STATE': set(),
'_NET_WM_ALLOWED_ACTIONS': set(),
'_PYTYLE_TYPE': set(),
'_NET_FRAME_EXTENTS': {
'top': 0, 'left': 0, 'right': 0, 'bottom': 0
}
}
self.load_geometry()
self.load_properties()
self.ox, self.oy, self.owidth, self.oheight = self._xwin.get_geometry()
self.omaximized = self.maximized()
self.odecorated = self.decorated()
self._xwin.listen()
def activate(self):
self._xwin.activate()
def decorations(self, toggle):
if toggle:
self._xwin.add_decorations()
else:
self._xwin.remove_decorations()
def get_property(self, pname):
assert pname in self.properties
return self.properties[pname]
def get_tiler(self):
if self.container and self.container.tiler:
return self.container.tiler
return None
def get_winclass(self):
cls = self._xwin.get_class()
if cls:
return cls[0]
return ''
def set_below(self, below):
self._xwin.set_below(below)
def lives(self):
try:
self._xwin.get_desktop_number()
return True
except:
return False
def load_geometry(self):
self.x, self.y, self.width, self.height = self._xwin.get_geometry()
def load_properties(self):
property_order = [
'_NET_WM_NAME',
'_NET_WM_DESKTOP',
'_NET_WM_WINDOW_TYPE',
'_NET_WM_STATE',
'_NET_WM_ALLOWED_ACTIONS',
'_NET_FRAME_EXTENTS',
'_PYTYLE_TYPE'
]
for pname in property_order:
self.update_property(pname)
def maximize(self):
self._xwin.maximize()
def decorated(self):
states = self.properties['_NET_WM_STATE']
if '_OB_WM_STATE_UNDECORATED' in states:
return False
return True
def maximized(self):
states = self.properties['_NET_WM_STATE']
if '_NET_WM_STATE_MAXIMIZED_VERT' in states and '_NET_WM_STATE_MAXIMIZED_HORZ' in states:
return True
return False
def moveresize(self, x, y, width, height):
self.x, self.y, self.width, self.height = x, y, width, height
self.pytyle_moved_time = time.time()
self._xwin.restore()
self._xwin.moveresize(x, y, width, height)
def original_state(self):
self.decorations(self.odecorated)
if self.omaximized:
self.maximize()
else:
self._xwin.moveresize(self.ox, self.oy, self.owidth, self.oheight)
def pytyle_place_holder(self):
return '_PYTYLE_TYPE_PLACE_HOLDER' in self.properties['_PYTYLE_TYPE']
def restack(self, below=False):
self._xwin.stack(not below)
def set_container(self, container):
self.container = container
def set_geometry(self, x, y, w, h):
self.x, self.y, self.width, self.height = x, y, w, h
self.update_monitor()
def set_monitor(self, wsid, mid):
new_mon = Workspace.WORKSPACES[wsid].get_monitor(mid)
if new_mon != self.monitor:
if self.monitor and self in self.monitor.windows:
self.monitor.remove_window(self)
self.monitor = new_mon
self.monitor.add_window(self)
def tilable(self):
if self.floating:
return False
states = self.properties['_NET_WM_STATE']
if '_NET_WM_STATE_HIDDEN' in states or self.pytyle_place_holder():
return False
return True
def update_monitor(self):
workspace = Workspace.WORKSPACES[self.properties['_NET_WM_DESKTOP']]
new_mon = workspace.get_monitor_xy(self.x, self.y)
if new_mon:
self.set_monitor(new_mon.workspace.id, new_mon.id)
def update_property(self, pname):
mname = 'update%s' % pname
if hasattr(self, mname):
m = getattr(self, mname)
m()
def update_NET_WM_NAME(self):
self.properties['_NET_WM_NAME'] = self._xwin.get_name() or 'N/A'
self.name = self.properties['_NET_WM_NAME']
def update_NET_FRAME_EXTENTS(self):
self.properties['_NET_FRAME_EXTENTS'] = self._xwin.get_frame_extents()
if self.container:
self.container.fit_window()
def update_NET_WM_DESKTOP(self):
self.properties['_NET_WM_DESKTOP'] = self._xwin.get_desktop_number()
self.load_geometry()
self.update_monitor()
def update_NET_WM_WINDOW_TYPE(self):
self.properties['_NET_WM_WINDOW_TYPE'] = self._xwin.get_types()
def update_NET_WM_STATE(self):
old = self.properties['_NET_WM_STATE']
new = self._xwin.get_states()
self.properties['_NET_WM_STATE'] = new
removed = old - new
added = new - old
if self.container:
if '_OB_WM_STATE_UNDECORATED' in removed or '_OB_WM_STATE_UNDECORATED' in added:
self.container.fit_window()
elif '_NET_WM_STATE_HIDDEN' in added:
self.container.tiler.remove(self)
elif self.monitor and self.monitor.get_tiler() and '_NET_WM_STATE_HIDDEN' in removed:
time.sleep(0.2)
self.monitor.get_tiler().add(self)
def update_NET_WM_ALLOWED_ACTIONS(self):
self.properties['_NET_WM_ALLOWED_ACTIONS'] = self._xwin.get_allowed_actions()
def update_PYTYLE_TYPE(self):
self.properties['_PYTYLE_TYPE'] = self._xwin.get_pytyle_types()
def __str__(self):
length = 30
padded_name = ''.join([' ' if ord(c) > 127 else c for c in self.name[0:length].strip()])
spaces = length - len(padded_name)
padded_name += ' ' * spaces
return '%s - [ID: %d, WORKSPACE: %d, MONITOR: %d, X: %d, Y: %d, Width: %d, Height: %d]' % (
padded_name, self.id, self.monitor.workspace.id, self.monitor.id, self.x, self.y, self.width, self.height
)
def DEBUG_sanity_move_resize(self):
print '-' * 30
print self.name
print '-' * 15
x1, y1, w1, h1 = self._xwin.get_geometry()
print 'Originals'
print x1, y1, w1, h1
print '-' * 15
self._xwin._moveresize(x1, y1, w1, h1)
x2, y2, w2, h2 = self._xwin.get_geometry()
print 'After move/resize'
print x2, y2, w2, h2
print '-' * 15
if x1 == x2 and y1 == y2 and w1 == w2 and h1 == h2:
print 'EXCELLENT!'
else:
print 'Bad form Peter...'
print '-' * 30, '\n'
class BogusWindow(Window):
def __init__(self, wsid, x, y, w, h, color=0x000000):
#self._fx, self._fy = x, y
#self._fw, self._fh = w, h
self._xwin = ptxcb.BlankWindow(wsid, x, y, w, h, color)
self.id = self._xwin.wid
self.container = None
self.monitor = None
self.floating = False
self.pytyle_moved_time = 0
self.moving = False
self.name = 'Place holder'
self.properties = {
'_NET_WM_NAME': 'Place holder',
'_NET_WM_DESKTOP': wsid,
'_NET_WM_WINDOW_TYPE': set(),
'_NET_WM_STATE': set(),
'_NET_WM_ALLOWED_ACTIONS': set(),
'_PYTYLE_TYPE': set('_PYTYLE_TYPE_PLACE_HOLDER'),
'_NET_FRAME_EXTENTS': {
'top': 0, 'left': 0, 'right': 0, 'bottom': 0
}
}
self.x, self.y = x, y
self.width, self.height = w, h
self.update_monitor()
#self.ox, self.oy, self.owidth, self.oheight = self._xwin.get_geometry()
self.omaximized = self.maximized()
self.odecorated = self.decorated()
self._xwin.listen()
Window.WINDOWS[self.id] = self
def pytyle_place_holder(self):
return True
def tilable(self):
return False
def close(self):
self._xwin.close()
if self.monitor:
self.monitor.remove_window(self)
| Python |
import ptxcb
import config
from window import BogusWindow
class Container(object):
idinc = 1
active = None
@staticmethod
def manage_focus(win):
if win and win.container:
win.container.borders_activate(win.container.tiler.decor)
elif Container.active:
Container.active.borders_normal(Container.active.tiler.decor)
def __init__(self, tiler, win=None):
self.tiler = tiler
self.id = Container.idinc
self.x, self.y, self.w, self.h = 0, 0, 1, 1
self.empty = True
self.default_color = self.tiler.get_option('borders_inactive_color')
self.set_window(win)
Container.idinc += 1
self._box = {
'htop': None, 'hbot': None,
'vleft': None, 'vright': None
}
def activate(self):
if self.win:
self.win.activate()
else:
self.borders_activate(self.tiler.decor)
def borders_activate(self, decor):
if Container.active and Container.active != self:
Container.active.borders_normal(Container.active.tiler.decor)
Container.active = self
if not decor:
self.box_show(self.tiler.get_option('borders_active_color'))
def borders_normal(self, decor):
if not decor:
self.box_show(self.default_color)
def box_hide(self):
for box in self._box.values():
if box:
box.close()
def box_show(self, color):
if not self.tiler.borders:
return
x, y, w, h = self.x, self.y, self.w, self.h
bw = self.tiler.get_option('border_width')
self.box_hide()
if self.tiler.workspace.id == ptxcb.XROOT.get_current_desktop():
self._box['htop'] = ptxcb.LineWindow(self.tiler.workspace.id, x, y, w, bw, color)
self._box['hbot'] = ptxcb.LineWindow(self.tiler.workspace.id, x, y + h, w, bw, color)
self._box['vleft'] = ptxcb.LineWindow(self.tiler.workspace.id, x, y, bw, h, color)
self._box['vright'] = ptxcb.LineWindow(self.tiler.workspace.id, x + w - bw, y, bw, h, color)
def decorations(self, decor, do_window=True):
if do_window and self.win:
self.win.decorations(decor)
if not decor:
if self == Container.active or (self.win and self.win.id == ptxcb.XROOT.get_active_window()):
self.borders_activate(decor)
else:
self.borders_normal(decor)
else:
self.box_hide()
def fit_window(self):
# Don't do anything if the pointer is on the window...
if not self.win or self.win.moving:
return
if (self.x >= 0 and self.y >= 0
and self.w > 0 and self.h > 0):
x, y, w, h = self.x, self.y, self.w, self.h
padding = self.tiler.get_option('padding')
if padding and len(padding) == 4:
# padding = top(0) right(1) bottom(2) left(3)
x += padding[3]
y += padding[0]
w -= padding[1] + padding[3]
h -= padding[0] + padding[2]
self.win.moveresize(x, y, w, h)
def get_name(self):
if not self.win:
return 'Container #%d' % self.id
return self.win.name
def moveresize(self, x, y, width, height):
self.x, self.y, self.w, self.h = x, y, width, height
self.fit_window()
self.decorations(self.tiler.decor)
def still(self):
self.moveresize(self.x, self.y, self.w, self.h)
def window_lower(self):
if self.win:
self.win.restack(below=True)
def window_raise(self):
if self.win:
self.win.restack()
def window_below(self, below):
if self.win:
self.win.set_below(below)
def remove(self, reset_window=False):
if self.win:
if isinstance(self.win, BogusWindow):
self.win.close()
elif reset_window:
self.reset()
else:
self.win.decorations(True)
self.win.set_container(None)
self.box_hide()
if self == Container.active:
Container.active = None
self.win = None
self.empty = True
def reset(self, reset_window=False):
self.win.original_state()
self.win.set_below(False)
def set_window(self, win=None, force=False):
if hasattr(self, 'win'):
if not force and (self.win == win or isinstance(win, BogusWindow)):
return
if self.win:
if isinstance(self.win, BogusWindow):
self.win.close()
else:
self.win.set_container(None)
self.win.decorations(True)
if not win:
self.win = BogusWindow(
self.tiler.workspace.id,
self.x, self.y, self.w, self.h,
self.tiler.get_option('placeholder_bg_color')
)
self.empty = True
else:
self.win = win
self.empty = False
self.win.set_container(self)
def switch(self, cont):
self.win.container, cont.win.container = cont.win.container, self.win.container
self.win, cont.win = cont.win, self.win
if Container.active == cont:
self.borders_activate(self.tiler.decor)
elif Container.active == self:
cont.borders_activate(cont.tiler.decor)
self.empty = isinstance(self.win, BogusWindow)
cont.empty = isinstance(cont.win, BogusWindow)
self.fit_window()
cont.fit_window()
def __str__(self):
ret = 'Container #%d' % self.id
if self.win:
ret += '\n\t' + str(self.win)
else:
ret += ' - Empty'
return ret
| Python |
import xcb.xproto
import ptxcb
import config
class Command:
_cmds = {}
_mods = {
'alt': xcb.xproto.ModMask._1,
'ctrl': xcb.xproto.ModMask.Control,
'shift': xcb.xproto.ModMask.Shift,
'super': xcb.xproto.ModMask._4,
'menu': xcb.xproto.ModMask._3
}
def __init__(self, keys, glbl=None, auto=None, manual=None):
self._original_keybinding = keys
self._mod_mask = 0
self._keycode = 0
self._glbl = glbl
self._auto = auto
self._manual = manual
self._keys = keys
for part in keys.split('-'):
part = part.lower()
if part in Command._mods:
self._mod_mask |= Command._mods[part]
elif part.capitalize() in ptxcb.keysyms:
self._keycode = ptxcb.connection.get_keycode(ptxcb.keysyms[part.capitalize()])
elif part in ptxcb.keysyms:
self._keycode = ptxcb.connection.get_keycode(ptxcb.keysyms[part])
else:
raise Exception('Bad command syntax')
if not self._mod_mask or not self._keycode:
raise Exception('Commands must have a modifier and a key')
if not ptxcb.XROOT.grab_key(self._keycode, self._mod_mask):
print 'Could not grab key:', keys
def get_original_keybinding(self):
return self._original_keybinding
def get_index(self):
return (self._keycode, self._mod_mask)
# This is for when we don't care if it's auto/manual
def get_global_command(self):
return self._glbl
def get_auto_command(self):
if not self._auto:
return self._glbl
return self._auto
def get_manual_command(self):
if not self._manual:
return self._glbl
return self._manual
def unbind(self):
ptxcb.XROOT.ungrab_key(self._keycode, self._mod_mask)
del Command._cmds[self.get_index()]
@staticmethod
def init():
Command.unbind_all()
keybindings = config.get_keybindings()
for k in keybindings:
cmd = Command(
k,
glbl=keybindings[k]['global'],
auto=keybindings[k]['auto'],
manual=keybindings[k]['manual']
)
Command._cmds[cmd.get_index()] = cmd
@staticmethod
def unbind_all():
for k in Command._cmds.keys():
Command._cmds[k].unbind()
@staticmethod
def lookup(keycode, mask):
vmask = 0
for mod in Command._mods:
if Command._mods[mod] & mask:
vmask |= Command._mods[mod]
if (keycode, vmask) not in Command._cmds:
return None
return Command._cmds[(keycode, vmask)]
| Python |
import ConfigParser
import distutils.sysconfig
import os
import os.path
import pwd
import re
import shutil
import sys
# This is PyTyle's custom configuration parser. There are two main
# goals accomplished with this sub-class:
#
# 1. It allows retrival of some other types, like lists, booleans, and
# lists of certain types (namely, floats and ints).
# 2. It automatically parses Monitor/Workspace/Tiler specific configuration
# sections and loads them into a tuple (wsid, mid, tiler) indexed
# dictionary.
# 3. Iterfaces with the "option_types" dictionary specified below, allowing
# for more automatic retrieval of configuration settings.
class PyTyleConfigParser(ConfigParser.SafeConfigParser):
def getboolean(self, section, option):
if self.get(section, option).lower() == 'yes':
return True
return False
def gethex(self, section, option):
return int(self.get(section, option), 16)
def getlist(self, section, option):
def clean(s):
return s.replace('"', '').replace("'", '')
return map(
clean,
self.get(section, option).split()
)
def getfloatlist(self, section, option):
try:
return map(
float,
self.getlist(section, option)
)
except ValueError:
return self.getlist(section, option)
def getintlist(self, section, option):
try:
return map(
int,
self.getlist(section, option)
)
except ValueError:
return self.getlist(section, option)
def get_option(self, section, option):
assert option in option_types
return option_types[option]['exec'](self, section, option)
def get_global_configs(self):
retval = {}
if 'Global' in self.sections():
for option in self.options('Global'):
retval[option] = self.get_option('Global', option)
return retval
def get_global_keybindings(self):
retval = {}
if 'GlobalKeybindings' in self.sections():
for option in self.options('GlobalKeybindings'):
retval[option] = self.get('GlobalKeybindings', option)
return retval
def get_auto_keybindings(self):
retval = {}
if 'AutoKeybindings' in self.sections():
for option in self.options('AutoKeybindings'):
retval[option] = self.get('AutoKeybindings', option)
return retval
def get_manual_keybindings(self):
retval = {}
if 'ManualKeybindings' in self.sections():
for option in self.options('ManualKeybindings'):
retval[option] = self.get('ManualKeybindings', option)
return retval
def get_wmt_configs(self):
retval = {}
all_tilers = self.get_option('Global', 'all_tilers')
for section in self.sections():
for tiler in all_tilers:
m = re.match(
'^(Workspace([0-9]+)-?|Monitor([0-9]+)-?|' + tiler + '-?){1,3}$',
section
)
if m:
wsid = int(m.group(2)) if m.group(2) else None
mid = int(m.group(3)) if m.group(3) else None
tiler = tiler if tiler.lower() in section.lower() else None
retval[(wsid, mid, tiler)] = {}
for option in self.options(m.group(0)):
retval[(wsid, mid, tiler)][option] = self.get_option(
m.group(0),
option
)
return retval
# Find the configuration file
xdg = os.getenv('XDG_CONFIG_HOME')
home = os.getenv('HOME')
logname = os.getenv('LOGNAME')
user_name = pwd.getpwuid(os.getuid())[0]
config_path = None
config_filename = 'config.ini'
default_file = os.path.join(
distutils.sysconfig.get_python_lib(),
'pt',
config_filename
)
if xdg:
config_path = os.path.join(xdg, 'pytyle2')
elif home:
config_path = os.path.join(home, '.config', 'pytyle2')
elif logname:
config_path = os.path.join(logname, '.config', 'pytyle2')
elif user_name:
config_path = os.path.join(user_name, '.config', 'pytyle2')
# A list of supported options independent of section header.
# Please do not change settings here. The settings specified here
# are the minimal required for PyTyle to function properly.
option_types = {
'all_tilers': {
'exec': PyTyleConfigParser.getlist,
'default': ['Vertical']
},
'movetime_offset': {
'exec': PyTyleConfigParser.getfloat,
'default': 0.5
},
'tilers': {
'exec': PyTyleConfigParser.getlist,
'default': ['Vertical']
},
'ignore': {
'exec': PyTyleConfigParser.getlist,
'default': []
},
'decorations': {
'exec': PyTyleConfigParser.getboolean,
'default': True
},
'borders': {
'exec': PyTyleConfigParser.getboolean,
'default': True
},
'border_width': {
'exec': PyTyleConfigParser.getint,
'default': 2
},
'borders_active_color': {
'exec': PyTyleConfigParser.gethex,
'default': 0xff0000,
},
'borders_inactive_color': {
'exec': PyTyleConfigParser.gethex,
'default': 0x008800,
},
'borders_catchall_color': {
'exec': PyTyleConfigParser.gethex,
'default': 0x3366ff,
},
'placeholder_bg_color': {
'exec': PyTyleConfigParser.gethex,
'default': 0x000000,
},
'margin': {
'exec': PyTyleConfigParser.getintlist,
'default': []
},
'padding': {
'exec': PyTyleConfigParser.getintlist,
'default': []
},
'always_monitor_cmd': {
'exec': PyTyleConfigParser.getboolean,
'default': False
},
'tile_on_startup': {
'exec': PyTyleConfigParser.getboolean,
'default': False
},
'step_size': {
'exec': PyTyleConfigParser.getfloat,
'default': 0.05
},
'width_factor': {
'exec': PyTyleConfigParser.getfloat,
'default': 0.5
},
'height_factor': {
'exec': PyTyleConfigParser.getfloat,
'default': 0.5
},
'rows': {
'exec': PyTyleConfigParser.getint,
'default': 2
},
'columns': {
'exec': PyTyleConfigParser.getint,
'default': 2
},
'push_down': {
'exec': PyTyleConfigParser.getint,
'default': 25
},
'push_over': {
'exec': PyTyleConfigParser.getint,
'default': 0
},
'horz_align': {
'exec': PyTyleConfigParser.get,
'default': 'left'
},
'shallow_resize': {
'exec': PyTyleConfigParser.getboolean,
'default': True
}
}
# Specified in the "(Auto|Manual)Keybindings" section
keybindings = {}
# Settings specified in the "Global" section
glbls = {}
# A tuple (wsid, mid, tiler) indexed dictionary that allows for
# Monitor/Workspace/Tiler specific settings. The order or precedence
# (in descending order) is as follows:
#
# Workspace/Monitor/Tiler
# Workspace/Monitor
# Workspace/Tiler
# Monitor/Tiler
# Workspace
# Monitor
# Tiler
# Globals
# Defaults (specified in option_types above)
#
# Options can be specified in section headers. The following are some
# valid examples:
#
# [Workspace0-Monitor1] or [Monitor1-Workspace0]
# Specifies options that only apply to the monitor indexed at 1 on
# the first workspace.
#
# [Horizontal]
# Specifies options that only apply to the Horizontal tiling layout.
#
# [Monitor0-Vertical] or [Vertical-Monitor0]
# Specifies options that only apply to the Vertical tiling layout on the
# monitor indexed at 0.
#
# [Monitor2-Horizontal-Workspace3] or any ordering thereof
# Specifies options that only apply to the Horizontal tiling layout on
# the monitor indexed at 2 and the fourth workspace.
#
# Essentially, any combination of "Workspace#", "Monitor#", or "[Tiling
# layout name]" is valid.
wmt = {}
# Loads the configuration file. This is called automatically when
# this module is imported, but it can also be called again when
# the settings ought to be refreshed.
# If no configuration file exists, create one.
def load_config_file():
global glbls, keybindings, wmt, paths
# Find the configuration file... create one if it doesn't exist
if not config_path:
config_file = default_file
else:
config_file = os.path.join(config_path, config_filename)
if not os.access(config_file, os.F_OK | os.R_OK):
if not os.path.exists(config_path):
os.makedirs(config_path)
if os.access(default_file, os.F_OK | os.R_OK):
shutil.copyfile(default_file, config_file)
# Something went wrong...
if not os.access(config_file, os.F_OK | os.R_OK):
config_file = default_file
if not os.access(config_file, os.F_OK | os.R_OK):
print '''
The configuration file could not be loaded. Please check to make
sure a configuration file exists at ~/.config/pytyle2/config.ini
or in the Python package directory.
'''
sys.exit(0)
conf = PyTyleConfigParser()
conf.read(config_file)
glbls = {}
keybindings = {}
k_global = conf.get_global_keybindings()
k_auto = conf.get_auto_keybindings()
k_manual = conf.get_manual_keybindings()
for k in k_global:
keybindings[k] = {
'global': k_global[k],
'auto': None,
'manual': None
}
for k in k_auto:
if k not in keybindings:
keybindings[k] = {
'global': None,
'auto': None,
'manual': None
}
keybindings[k]['auto'] = k_auto[k]
for k in k_manual:
if k not in keybindings:
keybindings[k] = {
'global': None,
'auto': None,
'manual': None
}
keybindings[k]['manual'] = k_manual[k]
glbls = conf.get_global_configs()
wmt = conf.get_wmt_configs()
# Just a public accessor to get a list of all the keybindings
def get_keybindings():
global keybindings
return keybindings
# A public accessor to obtain a value for an option. It takes
# precedence into account, therefore, this function should
# always be called with the most information available, unless
# otherwise desired.
def get_option(option, wsid=None, mid=None, tiler=None):
global glbls, wmt, option_types
# Cascade up... See the comments for the "wmt" variable
# above for more details.
# Generate lookup tuples... in order!
attempts = [
(wsid, mid, tiler),
(wsid, mid, None),
(wsid, None, tiler),
(None, mid, tiler),
(wsid, None, None),
(None, mid, None),
(None, None, tiler)
]
for lookup in attempts:
if lookup in wmt and option in wmt[lookup]:
return wmt[lookup][option]
if option in glbls:
return glbls[option]
else:
return option_types[option]['default']
return None
load_config_file()
| Python |
import struct
import xcb.xproto, xcb.xcb, xcb.xinerama, xcb.randr
conn = None
setup = None
syms_to_codes = {}
codes_to_syms = {}
def init():
global conn, setup
conn = xcb.xcb.connect()
setup = conn.get_setup()
init_keymap()
def init_keymap():
global setup, syms_to_codes, codes_to_syms
q = get_core().GetKeyboardMapping(
setup.min_keycode,
setup.max_keycode - setup.min_keycode + 1
).reply()
kpc = q.keysyms_per_keycode
for i, v in enumerate(q.keysyms):
keycode = (i / kpc) + setup.min_keycode
if v not in syms_to_codes:
syms_to_codes[v] = keycode
if keycode not in codes_to_syms:
codes_to_syms[keycode] = []
codes_to_syms[keycode].append(v)
def disconnect():
global conn
conn.disconnect()
def flush():
global conn
conn.flush()
def get_core():
global conn
return conn.core
def get_extensions():
ret = []
exts = get_core().ListExtensions().reply()
for name in exts.names:
ret.append(''.join([chr(i) for i in name.name]).lower())
return ret
def get_keycode(keysym):
global syms_to_codes
return syms_to_codes[keysym]
def get_keysym(keycode):
global codes_to_syms
return codes_to_syms[keycode][0]
def push():
flush()
xsync()
def xinerama_get_screens():
global conn, setup
ret = []
xinerama = conn(xcb.xinerama.key)
screens = xinerama.QueryScreens().reply().screen_info
for screen in screens:
ret.append({
'x': screen.x_org,
'y': screen.y_org,
'width': screen.width,
'height': screen.height
})
# For the RandR extension...
# I'm using nVidia TwinView... need to test this
#randr = conn(xcb.randr.key)
#r_screens = randr.GetScreenResources(setup.roots[0].root).reply()
#for icrt in r_screens.crtcs:
#crt = randr.GetCrtcInfo(icrt, xcb.xcb.CurrentTime).reply()
#crt.x, crt.y, crt.width, crt.height
return ret
def xsync():
try:
get_core().GetInputFocus().reply()
except:
return
init()
| Python |
import struct
import xcb.xproto
import connection
from atoms import atoms
class Atom:
_cache = {}
@staticmethod
def build_cache():
if Atom._cache:
return
for atom in atoms:
Atom._cache[atom] = connection.get_core().InternAtom(
False,
len(atom),
atom
).reply().atom
if (atoms[atom][0] is not None and
atoms[atom][0] not in Atom._cache):
Atom._cache[atoms[atom][0]] = connection.get_core().InternAtom(
False,
len(atoms[atom][0]),
atoms[atom][0]
).reply().atom
@staticmethod
def get_atom(name):
if not Atom._cache:
raise Exception('Atom cache has not been built')
if name not in Atom._cache:
Atom._cache[name] = connection.get_core().InternAtom(True, len(name), name).reply().atom
return Atom._cache[name]
@staticmethod
def get_atom_name(num):
return Atom.ords_to_str(connection.get_core().GetAtomName(num).reply().name)
@staticmethod
def get_atom_type(name):
if name not in atoms:
#raise Exception('Atom %s does not have a stored type' % name)
return xcb.xproto.Atom.Any
return Atom.get_atom(atoms[name][0])
@staticmethod
def get_atom_length(name):
if name not in atoms:
raise Exception('Atom %s does not have a stored length' % name)
return atoms[name][1]
@staticmethod
def get_type_name(atom_name):
if atom_name not in atoms:
#raise Exception('Atom %s does not have a stored type' % atom_name)
return None
return atoms[atom_name][0]
@staticmethod
def ords_to_str(ords):
return ''.join([chr(i) for i in ords if i < 128])
@staticmethod
def null_terminated_to_strarray(ords):
ret = []
s = ''
for o in ords:
if not o:
ret.append(s)
s = ''
else:
s += chr(o)
return ret
| Python |
atoms = {
'_PYTYLE_TYPE': ('ATOM', 32),
'_PYTYLE_TYPE_PLACE_HOLDER': ('ATOM', 32),
'_PYTYLE_TYPE_BORDER': ('ATOM', 32),
'WM_CLASS': ('STRING', 8),
'WM_HINTS': ('WM_HINTS', 32),
'WM_NAME': ('STRING', 8),
'WM_NORMAL_HINTS': ('WM_SIZE_HINTS', 32),
'WM_PROTOCOLS': ('ATOM', 32),
'WM_STATE': ('CARDINAL', 32),
'_NET_SUPPORTED': ('ATOM', 32),
'_NET_CLIENT_LIST': ('WINDOW', 32),
'_NET_CLIENT_LIST_STACKING': ('WINDOW', 32),
'_NET_NUMBER_OF_DESKTOPS': ('CARDINAL', 32),
'_NET_DESKTOP_GEOMETRY': ('CARDINAL', 32),
'_NET_DESKTOP_VIEWPORT': ('CARDINAL', 32),
'_NET_CURRENT_DESKTOP': ('CARDINAL', 32),
'_NET_DESKTOP_NAMES': ('UTF8_STRING[]', 8),
'_NET_ACTIVE_WINDOW': ('WINDOW', 32),
'_NET_WORKAREA': ('CARDINAL', 32),
'_NET_SUPPORTING_WM_CHECK': ('WINDOW', 32),
'_NET_VIRTUAL_ROOTS': ('WINDOW', 32),
'_NET_DESKTOP_LAYOUT': ('CARDINAL', 32),
'_NET_SHOWING_DESKTOP': ('CARDINAL', 32),
'_NET_CLOSE_WINDOW': (None, 0),
'_NET_MOVERESIZE_WINDOW': (None, 0),
'_NET_WM_MORERESIZE': (None, 0),
'_NET_RESTACK_WINDOW': (None, 0),
'_NET_REQUEST_FRAME_EXTENTS': ('CARDINAL', 32),
'_NET_WM_NAME': ('UTF8_STRING', 8),
'_NET_WM_VISIBLE_NAME': ('UTF8_STRING', 8),
'_NET_WM_ICON_NAME': ('UTF8_STRING', 8),
'_NET_WM_VISIBLE_ICON_NAME': ('UTF8_STRING', 8),
'_NET_WM_DESKTOP': ('CARDINAL', 32),
'_NET_WM_WINDOW_TYPE': ('ATOM', 32),
'_NET_WM_STATE': ('ATOM', 32),
'_NET_WM_ALLOWED_ACTIONS': ('ATOM', 32),
'_NET_WM_STRUT': ('CARDINAL', 32),
'_NET_WM_STRUT_PARTIAL': ('CARDINAL', 32),
'_NET_WM_ICON_GEOMETRY': ('CARDINAL', 32),
'_NET_WM_ICON': ('CARDINAL', 32),
'_NET_WM_PID': ('CARDINAL', 32),
'_NET_WM_HANDLED_ICONS': ('CARDINAL', 32),
'_NET_WM_USER_TIME': ('CARDINAL', 32),
'_NET_FRAME_EXTENTS': ('CARDINAL', 32),
'_OB_APP_TYPE': ('UTF8_STRING', 8),
'_MOTIF_WM_HINTS': ('_MOTIF_WM_HINTS', 32),
}
window_types = {
'_NET_WM_WINDOW_TYPE_DESKTOP': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_DOCK': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_TOOLBAR': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_MENU': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_UTILITY': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_SPLASH': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_DIALOG': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_DROPDOWN_MENU': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_POPUP_MENU': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_TOOLTIP': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_NOTIFICATION': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_COMBO': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_DND': ('ATOM', 32),
'_NET_WM_WINDOW_TYPE_NORMAL': ('ATOM', 32)
}
window_states = {
'_NET_WM_STATE_MODAL': ('ATOM', 32),
'_NET_WM_STATE_STICKY': ('ATOM', 32),
'_NET_WM_STATE_MAXIMIZED_VERT': ('ATOM', 32),
'_NET_WM_STATE_MAXIMIZED_HORZ': ('ATOM', 32),
'_NET_WM_STATE_SHADED': ('ATOM', 32),
'_NET_WM_STATE_SKIP_TASKBAR': ('ATOM', 32),
'_NET_WM_STATE_SKIP_PAGER': ('ATOM', 32),
'_NET_WM_STATE_HIDDEN': ('ATOM', 32),
'_NET_WM_STATE_FULLSCREEN': ('ATOM', 32),
'_NET_WM_STATE_ABOVE': ('ATOM', 32),
'_NET_WM_STATE_BELOW': ('ATOM', 32),
'_NET_WM_STATE_DEMANDS_ATTENTION': ('ATOM', 32)
}
window_allowed_actions = {
'_NET_WM_ACTION_MOVE': ('ATOM', 32),
'_NET_WM_ACTION_RESIZE': ('ATOM', 32),
'_NET_WM_ACTION_MINIMIZE': ('ATOM', 32),
'_NET_WM_ACTION_SHADE': ('ATOM', 32),
'_NET_WM_ACTION_STICK': ('ATOM', 32),
'_NET_WM_ACTION_MAXIMIZE_HORZ': ('ATOM', 32),
'_NET_WM_ACTION_MAXIMIZE_VERT': ('ATOM', 32),
'_NET_WM_ACTION_FULLSCREEN': ('ATOM', 32),
'_NET_WM_ACTION_CHANGE_DESKTOP': ('ATOM', 32),
'_NET_WM_ACTION_CLOSE': ('ATOM', 32),
'_NET_WM_ACTION_ABOVE': ('ATOM', 32),
'_NET_WM_ACTION_BELOW': ('ATOM', 32)
}
| Python |
import xcb.xproto
from window import Window
from window import XROOT
from atom import Atom
def dispatch(e):
NotifyModes = {
0: 'Normal', 1: 'Grab', 2: 'Ungrab', 3: 'WhileGrabbed'
}
NotifyDetails = {
0: 'Ancestor', 1: 'Virtual', 2: 'Inferior', 3: 'Nonlinear',
4: 'NonlinearVirtual', 5: 'Pointer', 6: 'PointerRoot',
7: 'None'
}
if isinstance(e, xcb.xproto.KeyPressEvent):
return {
'event': 'KeyPressEvent',
'keycode': e.detail,
'modifiers': e.state
}
elif isinstance(e, xcb.xproto.ButtonPressEvent):
print 'detail:', e.detail
print 'root_x:', e.root_x
print 'root_y:', e.root_y
print 'event_x:', e.event_x
print 'event_y:', e.event_y
print 'state:', e.state
print 'root:', e.root
print 'event:', e.event
print 'child:', e.child
return {
'event': 'ButtonPressEvent',
}
elif isinstance(e, xcb.xproto.ConfigureNotifyEvent):
return {
'event': 'ConfigureNotifyEvent',
'ewin': Window(e.event) if e.event != XROOT.wid else XROOT,
'window': Window(e.window),
'above': Window(e.above_sibling),
'x': e.x,
'y': e.y,
'width': e.width,
'height': e.height,
}
elif isinstance(e, xcb.xproto.PropertyNotifyEvent):
return {
'event': 'PropertyNotifyEvent',
'window': Window(e.window),
'atom': Atom.get_atom_name(e.atom),
'state': e.state
}
elif isinstance(e, xcb.xproto.FocusInEvent):
return {
'event': 'FocusInEvent',
'window': Window(e.event),
'detail': NotifyDetails[e.detail],
'mode': NotifyModes[e.mode]
}
elif isinstance(e, xcb.xproto.FocusOutEvent):
return {
'event': 'FocusOutEvent',
'window': Window(e.event),
'detail': NotifyDetails[e.detail],
'mode': NotifyModes[e.mode]
}
# print '-' * 30
# print e
# print dir(e)
# print '-' * 30
| Python |
import struct, traceback, time
import xcb.xproto, xcb.xcb
import connection
from atom import Atom
from events import events
class Window(object):
queue = []
@staticmethod
def exec_queue():
for tup in Window.queue:
tup[0](*tup[1:])
Window.queue = []
def __init__(self, wid):
self.wid = wid
# Helpers
def _get_geometry(self):
try:
raw = connection.get_core().GetGeometry(self.wid).reply()
return (raw.x, raw.y, raw.width, raw.height)
except:
return False
def _get_property(self, atom_name):
try:
if not Atom.get_type_name(atom_name):
return ''
rsp = connection.get_core().GetProperty(
False,
self.wid,
Atom.get_atom(atom_name),
Atom.get_atom_type(atom_name),
0,
(2 ** 32) - 1
).reply()
if Atom.get_type_name(atom_name) in ('UTF8_STRING', 'STRING'):
if atom_name == 'WM_CLASS':
return Atom.null_terminated_to_strarray(rsp.value)
else:
return Atom.ords_to_str(rsp.value)
elif Atom.get_type_name(atom_name) in ('UTF8_STRING[]', 'STRING[]'):
return Atom.null_terminated_to_strarray(rsp.value)
else:
return list(struct.unpack('I' * (len(rsp.value) / 4), rsp.value.buf()))
except:
pass
def _moveresize(self, x, y, width, height):
#print self.get_wmname(), x, y, width, height
self._send_client_event(
Atom.get_atom('_NET_MOVERESIZE_WINDOW'),
[
xcb.xproto.Gravity.NorthWest
| 1 << 8 | 1 << 9 | 1 << 10 | 1 << 11 | 1 << 13,
x,
y,
width,
height
],
32,
xcb.xproto.EventMask.StructureNotify
)
connection.push()
def _send_client_event(self, message_type, data, format=32, event_mask=xcb.xproto.EventMask.SubstructureRedirect):
XROOT._send_client_event_exec(self.wid, message_type, data, format, event_mask)
def _send_client_event_exec(self, to_wid, message_type, data, format=32, event_mask=xcb.xproto.EventMask.SubstructureRedirect):
try:
data = data + ([0] * (5 - len(data)))
packed = struct.pack(
'BBH7I',
events['ClientMessageEvent'],
format,
0,
to_wid,
message_type,
data[0], data[1], data[2], data[3], data[4]
)
connection.get_core().SendEvent(
False,
self.wid,
event_mask,
packed
)
except:
print traceback.format_exc()
def _set_property(self, atom_name, value):
try:
if isinstance(value, list):
data = struct.pack(len(value) * 'I', *value)
data_len = len(value)
else:
value = str(value)
data_len = len(value)
data = value
connection.get_core().ChangeProperty(
xcb.xproto.PropMode.Replace,
self.wid,
Atom.get_atom(atom_name),
Atom.get_atom_type(atom_name),
Atom.get_atom_length(atom_name),
data_len,
data
)
except:
print traceback.format_exc()
def activate(self):
self._send_client_event(
Atom.get_atom('_NET_ACTIVE_WINDOW'),
[
2,
xcb.xcb.CurrentTime,
self.wid
]
)
self.stack(True)
def add_decorations(self):
if XROOT.wm() == 'openbox':
self._send_client_event(
Atom.get_atom('_NET_WM_STATE'),
[
0,
Atom.get_atom('_OB_WM_STATE_UNDECORATED')
]
)
else:
self._set_property('_MOTIF_WM_HINTS', [2, 0, 1, 0, 0])
connection.push()
def button_pressed(self):
pointer = self.query_pointer()
if xcb.xproto.KeyButMask.Button1 & pointer.mask:
return True
return False
def close(self):
self._send_client_event(
Atom.get_atom('_NET_CLOSE_WINDOW'),
[
xcb.xproto.Time.CurrentTime,
2,
0,
0,
0
]
)
def get_allowed_actions(self):
return set([Atom.get_atom_name(anum) for anum in self._get_property('_NET_WM_ALLOWED_ACTIONS')])
def get_class(self):
return self._get_property('WM_CLASS')
def get_desktop_number(self):
ret = self._get_property('_NET_WM_DESKTOP')[0]
if ret == 0xFFFFFFFF:
return 'all'
return ret
def get_geometry(self):
try:
# Need to move up two parents to get proper coordinates
# and size for KWin
if XROOT.wm() == 'kwin':
x, y, w, h = self.query_tree_parent().query_tree_parent()._get_geometry()
else:
x, y, w, h = self.query_tree_parent()._get_geometry()
return (
x,
y,
w,
h
)
except:
return False
def get_name(self):
return self._get_property('_NET_WM_NAME')
def get_wmname(self):
return self._get_property('WM_NAME')
def get_pytyle_types(self):
return set([Atom.get_atom_name(anum) for anum in self._get_property('_PYTYLE_TYPE')])
def get_states(self):
return set([Atom.get_atom_name(anum) for anum in self._get_property('_NET_WM_STATE')])
def get_strut(self):
raw = self._get_property('_NET_WM_STRUT')
if not raw:
return None
return {
'left': raw[0],
'right': raw[1],
'top': raw[2],
'bottom': raw[3]
}
def get_strut_partial(self):
raw = self._get_property('_NET_WM_STRUT_PARTIAL')
if not raw:
return None
return {
'left': raw[0], 'right': raw[1],
'top': raw[2], 'bottom': raw[3],
'left_start_y': raw[4], 'left_end_y': raw[5],
'right_start_y': raw[6], 'right_end_y': raw[7],
'top_start_x': raw[8], 'top_end_x': raw[9],
'bottom_start_x': raw[10], 'bottom_end_x': raw[11]
}
def get_types(self):
return set([Atom.get_atom_name(anum) for anum in self._get_property('_NET_WM_WINDOW_TYPE')])
def get_visible_name(self):
return self._get_property('_NET_WM_VISIBLE_NAME')
def get_frame_extents(self):
raw = self._get_property('_NET_FRAME_EXTENTS')
if raw:
return {
'left': raw[0],
'right': raw[1],
'top': raw[2],
'bottom': raw[3]
}
else:
return {
'left': 0, 'right': 0,
'top': 0, 'bottom': 0
}
def listen(self):
self.set_event_masks(
xcb.xproto.EventMask.PropertyChange |
xcb.xproto.EventMask.FocusChange
)
def maximize(self):
self._send_client_event(
Atom.get_atom('_NET_WM_STATE'),
[
1, # _NET_WM_STATE_REMOVE = 0, _NET_WM_STATE_ADD = 1, _NET_WM_STATE_TOGGLE = 2
Atom.get_atom('_NET_WM_STATE_MAXIMIZED_VERT'),
Atom.get_atom('_NET_WM_STATE_MAXIMIZED_HORZ')
]
)
connection.push()
def moveresize(self, x, y, width, height):
Window.queue.append(
(Window.moveresize_exec, self, x, y, width, height)
)
def moveresize_exec(self, x, y, width, height):
try:
# KWin reports _NET_FRAME_EXTENTS correctly...
if XROOT.wm() == 'kwin':
borders = self.get_frame_extents()
w = width - (borders['left'] + borders['right'])
h = height - (borders['top'] + borders['bottom'])
else:
rx, ry, rwidth, rheight = self._get_geometry()
px, py, pwidth, pheight = self.get_geometry()
w = width - (pwidth - rwidth)
h = height - (pheight - rheight)
x = 0 if x < 0 else x
y = 0 if y < 0 else y
w = 1 if w <= 0 else w
h = 1 if h <= 0 else h
self._moveresize(x, y, w, h)
except:
return False
def query_pointer(self):
return connection.get_core().QueryPointer(self.wid).reply()
def query_tree_children(self):
try:
children = connection.get_core().QueryTree(self.wid).reply().children
return [wid for wid in children]
except:
return False
def query_tree_parent(self):
try:
return Window(connection.get_core().QueryTree(self.wid).reply().parent)
except:
return False
def remove_decorations(self):
if XROOT.wm() == 'openbox':
self._send_client_event(
Atom.get_atom('_NET_WM_STATE'),
[
1,
Atom.get_atom('_OB_WM_STATE_UNDECORATED')
]
)
else:
self._set_property('_MOTIF_WM_HINTS', [2, 0, 0, 0, 0])
connection.push()
def restack(self, below=False):
self._send_client_event(
Atom.get_atom('_NET_RESTACK_WINDOW'),
[
2 if not below else 1,
self.wid,
0
]
)
def restore(self):
self._send_client_event(
Atom.get_atom('_NET_WM_STATE'),
[
0, # _NET_WM_STATE_REMOVE = 0, _NET_WM_STATE_ADD = 1, _NET_WM_STATE_TOGGLE = 2
Atom.get_atom('_NET_WM_STATE_MAXIMIZED_VERT'),
Atom.get_atom('_NET_WM_STATE_MAXIMIZED_HORZ')
]
)
connection.push()
def set_below(self, below):
self._send_client_event(
Atom.get_atom('_NET_WM_STATE'),
[
1 if below else 0,
Atom.get_atom('_NET_WM_STATE_BELOW'),
]
)
connection.push()
def stack(self, above):
try:
connection.get_core().ConfigureWindow(
self.wid,
xcb.xproto.ConfigWindow.StackMode,
[xcb.xproto.StackMode.Above if above else xcb.xproto.StackMode.Below]
)
except:
return False
def set_desktop(self, desk):
self._send_client_event(
Atom.get_atom('_NET_WM_DESKTOP'),
[
desk,
2
]
)
def set_event_masks(self, event_masks):
try:
connection.get_core().ChangeWindowAttributes(
self.wid,
xcb.xproto.CW.EventMask,
[event_masks]
)
except:
print traceback.format_exc()
def set_override_redirect(self, override_redirect):
try:
connection.get_core().ChangeWindowAttributes(
self.wid,
xcb.xproto.CW.OverrideRedirect,
[override_redirect]
)
except:
print traceback.format_exc()
def grab_key(self, key, modifiers):
try:
addmods = [
0,
xcb.xproto.ModMask.Lock,
xcb.xproto.ModMask._2,
xcb.xproto.ModMask._2 | xcb.xproto.ModMask.Lock
]
for mod in addmods:
cook = connection.get_core().GrabKeyChecked(
True,
self.wid,
modifiers | mod,
key,
xcb.xproto.GrabMode.Async,
xcb.xproto.GrabMode.Async
)
cook.check()
return True
except xcb.xproto.BadAccess:
return False
def ungrab_key(self, key, modifiers):
try:
addmods = [
0,
xcb.xproto.ModMask.Lock,
xcb.xproto.ModMask._2,
xcb.xproto.ModMask._2 | xcb.xproto.ModMask.Lock
]
for mod in addmods:
cook = connection.get_core().UngrabKeyChecked(
key,
self.wid,
modifiers | mod,
)
cook.check()
except:
print traceback.format_exc()
print 'Could not ungrab key:', modifiers, '---', key
def unlisten(self):
self.set_event_masks(0)
class BlankWindow(Window):
def __init__(self, wsid, x, y, width, height, color=0x000000):
self._root_depth = connection.setup.roots[0].root_depth
self._root_visual = connection.setup.roots[0].root_visual
self._pixel = color
self.wid = connection.conn.generate_id()
connection.get_core().CreateWindow(
self._root_depth,
self.wid,
XROOT.wid,
x,
y,
width,
height,
0,
xcb.xproto.WindowClass.InputOutput,
self._root_visual,
xcb.xproto.CW.BackPixel,
[self._pixel]
)
self._set_property('_NET_WM_NAME', 'Place holder')
self.set_desktop(wsid)
self._set_property('WM_NAME', 'pytyle-internal-window')
self._set_property('WM_PROTOCOLS', [Atom.get_atom('WM_DELETE_WINDOW')])
self._set_property(
'_PYTYLE_TYPE',
[
Atom.get_atom('_PYTYLE_TYPE_PLACE_HOLDER')
]
)
#self.set_override_redirect(True)
connection.get_core().MapWindow(self.wid)
connection.push()
self._moveresize(x, y, width, height)
connection.push()
#self.set_override_redirect(False)
connection.push()
def close(self):
connection.get_core().DestroyWindow(self.wid)
class LineWindow(Window):
def __init__(self, wsid, x, y, width, height, color=0x000000):
if x < 0 or y < 0 or width < 1 or height < 1:
self.wid = 0
return
self._root_depth = connection.setup.roots[0].root_depth
self._root_visual = connection.setup.roots[0].root_visual
self._pixel = color
self.wid = connection.conn.generate_id()
connection.get_core().CreateWindow(
self._root_depth,
self.wid,
XROOT.wid,
x,
y,
width,
height,
0,
xcb.xproto.WindowClass.InputOutput,
self._root_visual,
xcb.xproto.CW.BackPixel,
[self._pixel]
)
self.set_override_redirect(True)
self._set_property('_NET_WM_NAME', 'Internal PyTyle Window')
connection.get_core().MapWindow(self.wid)
connection.push()
def close(self):
connection.get_core().UnmapWindow(self.wid)
class RootWindow(Window):
_singleton = None
@staticmethod
def get_root_window():
if RootWindow._singleton is None:
RootWindow._singleton = RootWindow()
return RootWindow._singleton
def __init__(self):
if RootWindow._singleton is not None:
raise RootWindow._singleton
self.wid = connection.setup.roots[0].root
Atom.build_cache()
self.windows = set()
self.listen()
def get_active_window(self):
raw = self._get_property('_NET_ACTIVE_WINDOW')
if raw:
return raw[0]
def get_current_desktop(self):
return self._get_property('_NET_CURRENT_DESKTOP')[0]
def get_desktop_geometry(self):
raw = self._get_property('_NET_DESKTOP_GEOMETRY')
return {
'width': raw[0],
'height': raw[1]
}
def get_desktop_layout(self):
raw = self._get_property('_NET_DESKTOP_LAYOUT')
return {
# _NET_WM_ORIENTATION_HORZ = 0
# _NET_WM_ORIENTATION_VERT = 1
'orientation': raw[0],
'columns': raw[1],
'rows': raw[2],
# _NET_WM_TOPLEFT = 0, _NET_WM_TOPRIGHT = 1
# _NET_WM_BOTTOMRIGHT = 2, _NET_WM_BOTTOMLEFT = 3
'starting_corner': raw[3]
}
def get_desktop_names(self):
return self._get_property('_NET_DESKTOP_NAMES')
def get_desktop_viewport(self):
return self._get_property('_NET_DESKTOP_VIEWPORT')
def get_name(self):
return 'ROOT'
def get_number_of_desktops(self):
return self._get_property('_NET_NUMBER_OF_DESKTOPS')[0]
def get_pointer_position(self):
raw = self.query_pointer()
return (raw.root_x, raw.root_y)
def get_supported_hints(self):
return [Atom.get_atom_name(anum) for anum in self._get_property('_NET_SUPPORTED')]
def get_visible_name(self):
return self.get_name()
def get_window_ids(self):
return self._get_property('_NET_CLIENT_LIST')
def get_window_stacked_ids(self):
return self._get_property('_NET_CLIENT_LIST_STACKING')
def get_window_manager_name(self):
return Window(self._get_property('_NET_SUPPORTING_WM_CHECK')[0]).get_name().lower()
def get_workarea(self):
raw = self._get_property('_NET_WORKAREA')
ret = []
for i in range(len(raw) / 4):
i *= 4
ret.append({
'x': raw[i + 0],
'y': raw[i + 1],
'width': raw[i + 2],
'height': raw[i + 3]
})
return ret
def is_showing_desktop(self):
if self._get_property('_NET_SHOWING_DESKTOP')[0] == 1:
return True
return False
def listen(self):
self.set_event_masks(
xcb.xproto.EventMask.SubstructureNotify |
xcb.xproto.EventMask.PropertyChange
)
def wm(self):
return self.get_window_manager_name()
XROOT = RootWindow.get_root_window()
| Python |
import atom
import connection
import event
import keysyms
import window
Atom = atom.Atom
BlankWindow = window.BlankWindow
LineWindow = window.LineWindow
RootWindow = window.RootWindow
Window = window.Window
keysyms = keysyms.keysyms
atoms = atoms.atoms
XROOT = window.XROOT
| Python |
keysyms = {
'VoidSymbol': 0xffffff,
'BackSpace': 0xff08,
'Tab': 0xff09,
'Linefeed': 0xff0a,
'Clear': 0xff0b,
'Return': 0xff0d,
'Pause': 0xff13,
'Scroll_Lock': 0xff14,
'Sys_Req': 0xff15,
'Escape': 0xff1b,
'Delete': 0xffff,
'Multi_key': 0xff20,
'Codeinput': 0xff37,
'SingleCandidate': 0xff3c,
'MultipleCandidate': 0xff3d,
'PreviousCandidate': 0xff3e,
'Kanji': 0xff21,
'Muhenkan': 0xff22,
'Henkan_Mode': 0xff23,
'Henkan': 0xff23,
'Romaji': 0xff24,
'Hiragana': 0xff25,
'Katakana': 0xff26,
'Hiragana_Katakana': 0xff27,
'Zenkaku': 0xff28,
'Hankaku': 0xff29,
'Zenkaku_Hankaku': 0xff2a,
'Touroku': 0xff2b,
'Massyo': 0xff2c,
'Kana_Lock': 0xff2d,
'Kana_Shift': 0xff2e,
'Eisu_Shift': 0xff2f,
'Eisu_toggle': 0xff30,
'Kanji_Bangou': 0xff37,
'Zen_Koho': 0xff3d,
'Mae_Koho': 0xff3e,
'Home': 0xff50,
'Left': 0xff51,
'Up': 0xff52,
'Right': 0xff53,
'Down': 0xff54,
'Prior': 0xff55,
'Page_Up': 0xff55,
'Next': 0xff56,
'Page_Down': 0xff56,
'End': 0xff57,
'Begin': 0xff58,
'Select': 0xff60,
'Print': 0xff61,
'Execute': 0xff62,
'Insert': 0xff63,
'Undo': 0xff65,
'Redo': 0xff66,
'Menu': 0xff67,
'Find': 0xff68,
'Cancel': 0xff69,
'Help': 0xff6a,
'Break': 0xff6b,
'Mode_switch': 0xff7e,
'script_switch': 0xff7e,
'Num_Lock': 0xff7f,
'KP_Space': 0xff80,
'KP_Tab': 0xff89,
'KP_Enter': 0xff8d,
'KP_F1': 0xff91,
'KP_F2': 0xff92,
'KP_F3': 0xff93,
'KP_F4': 0xff94,
'KP_Home': 0xff95,
'KP_Left': 0xff96,
'KP_Up': 0xff97,
'KP_Right': 0xff98,
'KP_Down': 0xff99,
'KP_Prior': 0xff9a,
'KP_Page_Up': 0xff9a,
'KP_Next': 0xff9b,
'KP_Page_Down': 0xff9b,
'KP_End': 0xff9c,
'KP_Begin': 0xff9d,
'KP_Insert': 0xff9e,
'KP_Delete': 0xff9f,
'KP_Equal': 0xffbd,
'KP_Multiply': 0xffaa,
'KP_Add': 0xffab,
'KP_Separator': 0xffac,
'KP_Subtract': 0xffad,
'KP_Decimal': 0xffae,
'KP_Divide': 0xffaf,
'KP_0': 0xffb0,
'KP_1': 0xffb1,
'KP_2': 0xffb2,
'KP_3': 0xffb3,
'KP_4': 0xffb4,
'KP_5': 0xffb5,
'KP_6': 0xffb6,
'KP_7': 0xffb7,
'KP_8': 0xffb8,
'KP_9': 0xffb9,
'F1': 0xffbe,
'F2': 0xffbf,
'F3': 0xffc0,
'F4': 0xffc1,
'F5': 0xffc2,
'F6': 0xffc3,
'F7': 0xffc4,
'F8': 0xffc5,
'F9': 0xffc6,
'F10': 0xffc7,
'F11': 0xffc8,
'L1': 0xffc8,
'F12': 0xffc9,
'L2': 0xffc9,
'F13': 0xffca,
'L3': 0xffca,
'F14': 0xffcb,
'L4': 0xffcb,
'F15': 0xffcc,
'L5': 0xffcc,
'F16': 0xffcd,
'L6': 0xffcd,
'F17': 0xffce,
'L7': 0xffce,
'F18': 0xffcf,
'L8': 0xffcf,
'F19': 0xffd0,
'L9': 0xffd0,
'F20': 0xffd1,
'L10': 0xffd1,
'F21': 0xffd2,
'R1': 0xffd2,
'F22': 0xffd3,
'R2': 0xffd3,
'F23': 0xffd4,
'R3': 0xffd4,
'F24': 0xffd5,
'R4': 0xffd5,
'F25': 0xffd6,
'R5': 0xffd6,
'F26': 0xffd7,
'R6': 0xffd7,
'F27': 0xffd8,
'R7': 0xffd8,
'F28': 0xffd9,
'R8': 0xffd9,
'F29': 0xffda,
'R9': 0xffda,
'F30': 0xffdb,
'R10': 0xffdb,
'F31': 0xffdc,
'R11': 0xffdc,
'F32': 0xffdd,
'R12': 0xffdd,
'F33': 0xffde,
'R13': 0xffde,
'F34': 0xffdf,
'R14': 0xffdf,
'F35': 0xffe0,
'R15': 0xffe0,
'Shift_L': 0xffe1,
'Shift_R': 0xffe2,
'Control_L': 0xffe3,
'Control_R': 0xffe4,
'Caps_Lock': 0xffe5,
'Shift_Lock': 0xffe6,
'Meta_L': 0xffe7,
'Meta_R': 0xffe8,
'Alt_L': 0xffe9,
'Alt_R': 0xffea,
'Super_L': 0xffeb,
'Super_R': 0xffec,
'Hyper_L': 0xffed,
'Hyper_R': 0xffee,
'ISO_Lock': 0xfe01,
'ISO_Level2_Latch': 0xfe02,
'ISO_Level3_Shift': 0xfe03,
'ISO_Level3_Latch': 0xfe04,
'ISO_Level3_Lock': 0xfe05,
'ISO_Level5_Shift': 0xfe11,
'ISO_Level5_Latch': 0xfe12,
'ISO_Level5_Lock': 0xfe13,
'ISO_Group_Shift': 0xff7e,
'ISO_Group_Latch': 0xfe06,
'ISO_Group_Lock': 0xfe07,
'ISO_Next_Group': 0xfe08,
'ISO_Next_Group_Lock': 0xfe09,
'ISO_Prev_Group': 0xfe0a,
'ISO_Prev_Group_Lock': 0xfe0b,
'ISO_First_Group': 0xfe0c,
'ISO_First_Group_Lock': 0xfe0d,
'ISO_Last_Group': 0xfe0e,
'ISO_Last_Group_Lock': 0xfe0f,
'ISO_Left_Tab': 0xfe20,
'ISO_Move_Line_Up': 0xfe21,
'ISO_Move_Line_Down': 0xfe22,
'ISO_Partial_Line_Up': 0xfe23,
'ISO_Partial_Line_Down': 0xfe24,
'ISO_Partial_Space_Left': 0xfe25,
'ISO_Partial_Space_Right': 0xfe26,
'ISO_Set_Margin_Left': 0xfe27,
'ISO_Set_Margin_Right': 0xfe28,
'ISO_Release_Margin_Left': 0xfe29,
'ISO_Release_Margin_Right': 0xfe2a,
'ISO_Release_Both_Margins': 0xfe2b,
'ISO_Fast_Cursor_Left': 0xfe2c,
'ISO_Fast_Cursor_Right': 0xfe2d,
'ISO_Fast_Cursor_Up': 0xfe2e,
'ISO_Fast_Cursor_Down': 0xfe2f,
'ISO_Continuous_Underline': 0xfe30,
'ISO_Discontinuous_Underline': 0xfe31,
'ISO_Emphasize': 0xfe32,
'ISO_Center_Object': 0xfe33,
'ISO_Enter': 0xfe34,
'dead_grave': 0xfe50,
'dead_acute': 0xfe51,
'dead_circumflex': 0xfe52,
'dead_tilde': 0xfe53,
'dead_perispomeni': 0xfe53,
'dead_macron': 0xfe54,
'dead_breve': 0xfe55,
'dead_abovedot': 0xfe56,
'dead_diaeresis': 0xfe57,
'dead_abovering': 0xfe58,
'dead_doubleacute': 0xfe59,
'dead_caron': 0xfe5a,
'dead_cedilla': 0xfe5b,
'dead_ogonek': 0xfe5c,
'dead_iota': 0xfe5d,
'dead_voiced_sound': 0xfe5e,
'dead_semivoiced_sound': 0xfe5f,
'dead_belowdot': 0xfe60,
'dead_hook': 0xfe61,
'dead_horn': 0xfe62,
'dead_stroke': 0xfe63,
'dead_abovecomma': 0xfe64,
'dead_psili': 0xfe64,
'dead_abovereversedcomma': 0xfe65,
'dead_dasia': 0xfe65,
'dead_doublegrave': 0xfe66,
'dead_belowring': 0xfe67,
'dead_belowmacron': 0xfe68,
'dead_belowcircumflex': 0xfe69,
'dead_belowtilde': 0xfe6a,
'dead_belowbreve': 0xfe6b,
'dead_belowdiaeresis': 0xfe6c,
'dead_invertedbreve': 0xfe6d,
'dead_belowcomma': 0xfe6e,
'dead_currency': 0xfe6f,
'dead_a': 0xfe80,
'dead_A': 0xfe81,
'dead_e': 0xfe82,
'dead_E': 0xfe83,
'dead_i': 0xfe84,
'dead_I': 0xfe85,
'dead_o': 0xfe86,
'dead_O': 0xfe87,
'dead_u': 0xfe88,
'dead_U': 0xfe89,
'dead_small_schwa': 0xfe8a,
'dead_capital_schwa': 0xfe8b,
'First_Virtual_Screen': 0xfed0,
'Prev_Virtual_Screen': 0xfed1,
'Next_Virtual_Screen': 0xfed2,
'Last_Virtual_Screen': 0xfed4,
'Terminate_Server': 0xfed5,
'AccessX_Enable': 0xfe70,
'AccessX_Feedback_Enable': 0xfe71,
'RepeatKeys_Enable': 0xfe72,
'SlowKeys_Enable': 0xfe73,
'BounceKeys_Enable': 0xfe74,
'StickyKeys_Enable': 0xfe75,
'MouseKeys_Enable': 0xfe76,
'MouseKeys_Accel_Enable': 0xfe77,
'Overlay1_Enable': 0xfe78,
'Overlay2_Enable': 0xfe79,
'AudibleBell_Enable': 0xfe7a,
'Pointer_Left': 0xfee0,
'Pointer_Right': 0xfee1,
'Pointer_Up': 0xfee2,
'Pointer_Down': 0xfee3,
'Pointer_UpLeft': 0xfee4,
'Pointer_UpRight': 0xfee5,
'Pointer_DownLeft': 0xfee6,
'Pointer_DownRight': 0xfee7,
'Pointer_Button_Dflt': 0xfee8,
'Pointer_Button1': 0xfee9,
'Pointer_Button2': 0xfeea,
'Pointer_Button3': 0xfeeb,
'Pointer_Button4': 0xfeec,
'Pointer_Button5': 0xfeed,
'Pointer_DblClick_Dflt': 0xfeee,
'Pointer_DblClick1': 0xfeef,
'Pointer_DblClick2': 0xfef0,
'Pointer_DblClick3': 0xfef1,
'Pointer_DblClick4': 0xfef2,
'Pointer_DblClick5': 0xfef3,
'Pointer_Drag_Dflt': 0xfef4,
'Pointer_Drag1': 0xfef5,
'Pointer_Drag2': 0xfef6,
'Pointer_Drag3': 0xfef7,
'Pointer_Drag4': 0xfef8,
'Pointer_Drag5': 0xfefd,
'Pointer_EnableKeys': 0xfef9,
'Pointer_Accelerate': 0xfefa,
'Pointer_DfltBtnNext': 0xfefb,
'Pointer_DfltBtnPrev': 0xfefc,
'3270_Duplicate': 0xfd01,
'3270_FieldMark': 0xfd02,
'3270_Right2': 0xfd03,
'3270_Left2': 0xfd04,
'3270_BackTab': 0xfd05,
'3270_EraseEOF': 0xfd06,
'3270_EraseInput': 0xfd07,
'3270_Reset': 0xfd08,
'3270_Quit': 0xfd09,
'3270_PA1': 0xfd0a,
'3270_PA2': 0xfd0b,
'3270_PA3': 0xfd0c,
'3270_Test': 0xfd0d,
'3270_Attn': 0xfd0e,
'3270_CursorBlink': 0xfd0f,
'3270_AltCursor': 0xfd10,
'3270_KeyClick': 0xfd11,
'3270_Jump': 0xfd12,
'3270_Ident': 0xfd13,
'3270_Rule': 0xfd14,
'3270_Copy': 0xfd15,
'3270_Play': 0xfd16,
'3270_Setup': 0xfd17,
'3270_Record': 0xfd18,
'3270_ChangeScreen': 0xfd19,
'3270_DeleteWord': 0xfd1a,
'3270_ExSelect': 0xfd1b,
'3270_CursorSelect': 0xfd1c,
'3270_PrintScreen': 0xfd1d,
'3270_Enter': 0xfd1e,
'space': 0x0020,
'exclam': 0x0021,
'quotedbl': 0x0022,
'numbersign': 0x0023,
'dollar': 0x0024,
'percent': 0x0025,
'ampersand': 0x0026,
'apostrophe': 0x0027,
'quoteright': 0x0027,
'parenleft': 0x0028,
'parenright': 0x0029,
'asterisk': 0x002a,
'plus': 0x002b,
'comma': 0x002c,
'minus': 0x002d,
'period': 0x002e,
'slash': 0x002f,
'0': 0x0030,
'1': 0x0031,
'2': 0x0032,
'3': 0x0033,
'4': 0x0034,
'5': 0x0035,
'6': 0x0036,
'7': 0x0037,
'8': 0x0038,
'9': 0x0039,
'colon': 0x003a,
'semicolon': 0x003b,
'less': 0x003c,
'equal': 0x003d,
'greater': 0x003e,
'question': 0x003f,
'at': 0x0040,
'A': 0x0041,
'B': 0x0042,
'C': 0x0043,
'D': 0x0044,
'E': 0x0045,
'F': 0x0046,
'G': 0x0047,
'H': 0x0048,
'I': 0x0049,
'J': 0x004a,
'K': 0x004b,
'L': 0x004c,
'M': 0x004d,
'N': 0x004e,
'O': 0x004f,
'P': 0x0050,
'Q': 0x0051,
'R': 0x0052,
'S': 0x0053,
'T': 0x0054,
'U': 0x0055,
'V': 0x0056,
'W': 0x0057,
'X': 0x0058,
'Y': 0x0059,
'Z': 0x005a,
'bracketleft': 0x005b,
'backslash': 0x005c,
'bracketright': 0x005d,
'asciicircum': 0x005e,
'underscore': 0x005f,
'grave': 0x0060,
'quoteleft': 0x0060,
'a': 0x0061,
'b': 0x0062,
'c': 0x0063,
'd': 0x0064,
'e': 0x0065,
'f': 0x0066,
'g': 0x0067,
'h': 0x0068,
'i': 0x0069,
'j': 0x006a,
'k': 0x006b,
'l': 0x006c,
'm': 0x006d,
'n': 0x006e,
'o': 0x006f,
'p': 0x0070,
'q': 0x0071,
'r': 0x0072,
's': 0x0073,
't': 0x0074,
'u': 0x0075,
'v': 0x0076,
'w': 0x0077,
'x': 0x0078,
'y': 0x0079,
'z': 0x007a,
'braceleft': 0x007b,
'bar': 0x007c,
'braceright': 0x007d,
'asciitilde': 0x007e,
'nobreakspace': 0x00a0,
'exclamdown': 0x00a1,
'cent': 0x00a2,
'sterling': 0x00a3,
'currency': 0x00a4,
'yen': 0x00a5,
'brokenbar': 0x00a6,
'section': 0x00a7,
'diaeresis': 0x00a8,
'copyright': 0x00a9,
'ordfeminine': 0x00aa,
'guillemotleft': 0x00ab,
'notsign': 0x00ac,
'hyphen': 0x00ad,
'registered': 0x00ae,
'macron': 0x00af,
'degree': 0x00b0,
'plusminus': 0x00b1,
'twosuperior': 0x00b2,
'threesuperior': 0x00b3,
'acute': 0x00b4,
'mu': 0x00b5,
'paragraph': 0x00b6,
'periodcentered': 0x00b7,
'cedilla': 0x00b8,
'onesuperior': 0x00b9,
'masculine': 0x00ba,
'guillemotright': 0x00bb,
'onequarter': 0x00bc,
'onehalf': 0x00bd,
'threequarters': 0x00be,
'questiondown': 0x00bf,
'Agrave': 0x00c0,
'Aacute': 0x00c1,
'Acircumflex': 0x00c2,
'Atilde': 0x00c3,
'Adiaeresis': 0x00c4,
'Aring': 0x00c5,
'AE': 0x00c6,
'Ccedilla': 0x00c7,
'Egrave': 0x00c8,
'Eacute': 0x00c9,
'Ecircumflex': 0x00ca,
'Ediaeresis': 0x00cb,
'Igrave': 0x00cc,
'Iacute': 0x00cd,
'Icircumflex': 0x00ce,
'Idiaeresis': 0x00cf,
'ETH': 0x00d0,
'Eth': 0x00d0,
'Ntilde': 0x00d1,
'Ograve': 0x00d2,
'Oacute': 0x00d3,
'Ocircumflex': 0x00d4,
'Otilde': 0x00d5,
'Odiaeresis': 0x00d6,
'multiply': 0x00d7,
'Oslash': 0x00d8,
'Ooblique': 0x00d8,
'Ugrave': 0x00d9,
'Uacute': 0x00da,
'Ucircumflex': 0x00db,
'Udiaeresis': 0x00dc,
'Yacute': 0x00dd,
'THORN': 0x00de,
'Thorn': 0x00de,
'ssharp': 0x00df,
'agrave': 0x00e0,
'aacute': 0x00e1,
'acircumflex': 0x00e2,
'atilde': 0x00e3,
'adiaeresis': 0x00e4,
'aring': 0x00e5,
'ae': 0x00e6,
'ccedilla': 0x00e7,
'egrave': 0x00e8,
'eacute': 0x00e9,
'ecircumflex': 0x00ea,
'ediaeresis': 0x00eb,
'igrave': 0x00ec,
'iacute': 0x00ed,
'icircumflex': 0x00ee,
'idiaeresis': 0x00ef,
'eth': 0x00f0,
'ntilde': 0x00f1,
'ograve': 0x00f2,
'oacute': 0x00f3,
'ocircumflex': 0x00f4,
'otilde': 0x00f5,
'odiaeresis': 0x00f6,
'division': 0x00f7,
'oslash': 0x00f8,
'ooblique': 0x00f8,
'ugrave': 0x00f9,
'uacute': 0x00fa,
'ucircumflex': 0x00fb,
'udiaeresis': 0x00fc,
'yacute': 0x00fd,
'thorn': 0x00fe,
'ydiaeresis': 0x00ff,
'Aogonek': 0x01a1,
'breve': 0x01a2,
'Lstroke': 0x01a3,
'Lcaron': 0x01a5,
'Sacute': 0x01a6,
'Scaron': 0x01a9,
'Scedilla': 0x01aa,
'Tcaron': 0x01ab,
'Zacute': 0x01ac,
'Zcaron': 0x01ae,
'Zabovedot': 0x01af,
'aogonek': 0x01b1,
'ogonek': 0x01b2,
'lstroke': 0x01b3,
'lcaron': 0x01b5,
'sacute': 0x01b6,
'caron': 0x01b7,
'scaron': 0x01b9,
'scedilla': 0x01ba,
'tcaron': 0x01bb,
'zacute': 0x01bc,
'doubleacute': 0x01bd,
'zcaron': 0x01be,
'zabovedot': 0x01bf,
'Racute': 0x01c0,
'Abreve': 0x01c3,
'Lacute': 0x01c5,
'Cacute': 0x01c6,
'Ccaron': 0x01c8,
'Eogonek': 0x01ca,
'Ecaron': 0x01cc,
'Dcaron': 0x01cf,
'Dstroke': 0x01d0,
'Nacute': 0x01d1,
'Ncaron': 0x01d2,
'Odoubleacute': 0x01d5,
'Rcaron': 0x01d8,
'Uring': 0x01d9,
'Udoubleacute': 0x01db,
'Tcedilla': 0x01de,
'racute': 0x01e0,
'abreve': 0x01e3,
'lacute': 0x01e5,
'cacute': 0x01e6,
'ccaron': 0x01e8,
'eogonek': 0x01ea,
'ecaron': 0x01ec,
'dcaron': 0x01ef,
'dstroke': 0x01f0,
'nacute': 0x01f1,
'ncaron': 0x01f2,
'odoubleacute': 0x01f5,
'udoubleacute': 0x01fb,
'rcaron': 0x01f8,
'uring': 0x01f9,
'tcedilla': 0x01fe,
'abovedot': 0x01ff,
'Hstroke': 0x02a1,
'Hcircumflex': 0x02a6,
'Iabovedot': 0x02a9,
'Gbreve': 0x02ab,
'Jcircumflex': 0x02ac,
'hstroke': 0x02b1,
'hcircumflex': 0x02b6,
'idotless': 0x02b9,
'gbreve': 0x02bb,
'jcircumflex': 0x02bc,
'Cabovedot': 0x02c5,
'Ccircumflex': 0x02c6,
'Gabovedot': 0x02d5,
'Gcircumflex': 0x02d8,
'Ubreve': 0x02dd,
'Scircumflex': 0x02de,
'cabovedot': 0x02e5,
'ccircumflex': 0x02e6,
'gabovedot': 0x02f5,
'gcircumflex': 0x02f8,
'ubreve': 0x02fd,
'scircumflex': 0x02fe,
'kra': 0x03a2,
'kappa': 0x03a2,
'Rcedilla': 0x03a3,
'Itilde': 0x03a5,
'Lcedilla': 0x03a6,
'Emacron': 0x03aa,
'Gcedilla': 0x03ab,
'Tslash': 0x03ac,
'rcedilla': 0x03b3,
'itilde': 0x03b5,
'lcedilla': 0x03b6,
'emacron': 0x03ba,
'gcedilla': 0x03bb,
'tslash': 0x03bc,
'ENG': 0x03bd,
'eng': 0x03bf,
'Amacron': 0x03c0,
'Iogonek': 0x03c7,
'Eabovedot': 0x03cc,
'Imacron': 0x03cf,
'Ncedilla': 0x03d1,
'Omacron': 0x03d2,
'Kcedilla': 0x03d3,
'Uogonek': 0x03d9,
'Utilde': 0x03dd,
'Umacron': 0x03de,
'amacron': 0x03e0,
'iogonek': 0x03e7,
'eabovedot': 0x03ec,
'imacron': 0x03ef,
'ncedilla': 0x03f1,
'omacron': 0x03f2,
'kcedilla': 0x03f3,
'uogonek': 0x03f9,
'utilde': 0x03fd,
'umacron': 0x03fe,
'Babovedot': 0x1001e02,
'babovedot': 0x1001e03,
'Dabovedot': 0x1001e0a,
'Wgrave': 0x1001e80,
'Wacute': 0x1001e82,
'dabovedot': 0x1001e0b,
'Ygrave': 0x1001ef2,
'Fabovedot': 0x1001e1e,
'fabovedot': 0x1001e1f,
'Mabovedot': 0x1001e40,
'mabovedot': 0x1001e41,
'Pabovedot': 0x1001e56,
'wgrave': 0x1001e81,
'pabovedot': 0x1001e57,
'wacute': 0x1001e83,
'Sabovedot': 0x1001e60,
'ygrave': 0x1001ef3,
'Wdiaeresis': 0x1001e84,
'wdiaeresis': 0x1001e85,
'sabovedot': 0x1001e61,
'Wcircumflex': 0x1000174,
'Tabovedot': 0x1001e6a,
'Ycircumflex': 0x1000176,
'wcircumflex': 0x1000175,
'tabovedot': 0x1001e6b,
'ycircumflex': 0x1000177,
'OE': 0x13bc,
'oe': 0x13bd,
'Ydiaeresis': 0x13be,
'overline': 0x047e,
'kana_fullstop': 0x04a1,
'kana_openingbracket': 0x04a2,
'kana_closingbracket': 0x04a3,
'kana_comma': 0x04a4,
'kana_conjunctive': 0x04a5,
'kana_middledot': 0x04a5,
'kana_WO': 0x04a6,
'kana_a': 0x04a7,
'kana_i': 0x04a8,
'kana_u': 0x04a9,
'kana_e': 0x04aa,
'kana_o': 0x04ab,
'kana_ya': 0x04ac,
'kana_yu': 0x04ad,
'kana_yo': 0x04ae,
'kana_tsu': 0x04af,
'kana_tu': 0x04af,
'prolongedsound': 0x04b0,
'kana_A': 0x04b1,
'kana_I': 0x04b2,
'kana_U': 0x04b3,
'kana_E': 0x04b4,
'kana_O': 0x04b5,
'kana_KA': 0x04b6,
'kana_KI': 0x04b7,
'kana_KU': 0x04b8,
'kana_KE': 0x04b9,
'kana_KO': 0x04ba,
'kana_SA': 0x04bb,
'kana_SHI': 0x04bc,
'kana_SU': 0x04bd,
'kana_SE': 0x04be,
'kana_SO': 0x04bf,
'kana_TA': 0x04c0,
'kana_CHI': 0x04c1,
'kana_TI': 0x04c1,
'kana_TSU': 0x04c2,
'kana_TU': 0x04c2,
'kana_TE': 0x04c3,
'kana_TO': 0x04c4,
'kana_NA': 0x04c5,
'kana_NI': 0x04c6,
'kana_NU': 0x04c7,
'kana_NE': 0x04c8,
'kana_NO': 0x04c9,
'kana_HA': 0x04ca,
'kana_HI': 0x04cb,
'kana_FU': 0x04cc,
'kana_HU': 0x04cc,
'kana_HE': 0x04cd,
'kana_HO': 0x04ce,
'kana_MA': 0x04cf,
'kana_MI': 0x04d0,
'kana_MU': 0x04d1,
'kana_ME': 0x04d2,
'kana_MO': 0x04d3,
'kana_YA': 0x04d4,
'kana_YU': 0x04d5,
'kana_YO': 0x04d6,
'kana_RA': 0x04d7,
'kana_RI': 0x04d8,
'kana_RU': 0x04d9,
'kana_RE': 0x04da,
'kana_RO': 0x04db,
'kana_WA': 0x04dc,
'kana_N': 0x04dd,
'voicedsound': 0x04de,
'semivoicedsound': 0x04df,
'kana_switch': 0xff7e,
'Farsi_0': 0x10006f0,
'Farsi_1': 0x10006f1,
'Farsi_2': 0x10006f2,
'Farsi_3': 0x10006f3,
'Farsi_4': 0x10006f4,
'Farsi_5': 0x10006f5,
'Farsi_6': 0x10006f6,
'Farsi_7': 0x10006f7,
'Farsi_8': 0x10006f8,
'Farsi_9': 0x10006f9,
'Arabic_percent': 0x100066a,
'Arabic_superscript_alef': 0x1000670,
'Arabic_tteh': 0x1000679,
'Arabic_peh': 0x100067e,
'Arabic_tcheh': 0x1000686,
'Arabic_ddal': 0x1000688,
'Arabic_rreh': 0x1000691,
'Arabic_comma': 0x05ac,
'Arabic_fullstop': 0x10006d4,
'Arabic_0': 0x1000660,
'Arabic_1': 0x1000661,
'Arabic_2': 0x1000662,
'Arabic_3': 0x1000663,
'Arabic_4': 0x1000664,
'Arabic_5': 0x1000665,
'Arabic_6': 0x1000666,
'Arabic_7': 0x1000667,
'Arabic_8': 0x1000668,
'Arabic_9': 0x1000669,
'Arabic_semicolon': 0x05bb,
'Arabic_question_mark': 0x05bf,
'Arabic_hamza': 0x05c1,
'Arabic_maddaonalef': 0x05c2,
'Arabic_hamzaonalef': 0x05c3,
'Arabic_hamzaonwaw': 0x05c4,
'Arabic_hamzaunderalef': 0x05c5,
'Arabic_hamzaonyeh': 0x05c6,
'Arabic_alef': 0x05c7,
'Arabic_beh': 0x05c8,
'Arabic_tehmarbuta': 0x05c9,
'Arabic_teh': 0x05ca,
'Arabic_theh': 0x05cb,
'Arabic_jeem': 0x05cc,
'Arabic_hah': 0x05cd,
'Arabic_khah': 0x05ce,
'Arabic_dal': 0x05cf,
'Arabic_thal': 0x05d0,
'Arabic_ra': 0x05d1,
'Arabic_zain': 0x05d2,
'Arabic_seen': 0x05d3,
'Arabic_sheen': 0x05d4,
'Arabic_sad': 0x05d5,
'Arabic_dad': 0x05d6,
'Arabic_tah': 0x05d7,
'Arabic_zah': 0x05d8,
'Arabic_ain': 0x05d9,
'Arabic_ghain': 0x05da,
'Arabic_tatweel': 0x05e0,
'Arabic_feh': 0x05e1,
'Arabic_qaf': 0x05e2,
'Arabic_kaf': 0x05e3,
'Arabic_lam': 0x05e4,
'Arabic_meem': 0x05e5,
'Arabic_noon': 0x05e6,
'Arabic_ha': 0x05e7,
'Arabic_heh': 0x05e7,
'Arabic_waw': 0x05e8,
'Arabic_alefmaksura': 0x05e9,
'Arabic_yeh': 0x05ea,
'Arabic_fathatan': 0x05eb,
'Arabic_dammatan': 0x05ec,
'Arabic_kasratan': 0x05ed,
'Arabic_fatha': 0x05ee,
'Arabic_damma': 0x05ef,
'Arabic_kasra': 0x05f0,
'Arabic_shadda': 0x05f1,
'Arabic_sukun': 0x05f2,
'Arabic_madda_above': 0x1000653,
'Arabic_hamza_above': 0x1000654,
'Arabic_hamza_below': 0x1000655,
'Arabic_jeh': 0x1000698,
'Arabic_veh': 0x10006a4,
'Arabic_keheh': 0x10006a9,
'Arabic_gaf': 0x10006af,
'Arabic_noon_ghunna': 0x10006ba,
'Arabic_heh_doachashmee': 0x10006be,
'Farsi_yeh': 0x10006cc,
'Arabic_farsi_yeh': 0x10006cc,
'Arabic_yeh_baree': 0x10006d2,
'Arabic_heh_goal': 0x10006c1,
'Arabic_switch': 0xff7e,
'Cyrillic_GHE_bar': 0x1000492,
'Cyrillic_ghe_bar': 0x1000493,
'Cyrillic_ZHE_descender': 0x1000496,
'Cyrillic_zhe_descender': 0x1000497,
'Cyrillic_KA_descender': 0x100049a,
'Cyrillic_ka_descender': 0x100049b,
'Cyrillic_KA_vertstroke': 0x100049c,
'Cyrillic_ka_vertstroke': 0x100049d,
'Cyrillic_EN_descender': 0x10004a2,
'Cyrillic_en_descender': 0x10004a3,
'Cyrillic_U_straight': 0x10004ae,
'Cyrillic_u_straight': 0x10004af,
'Cyrillic_U_straight_bar': 0x10004b0,
'Cyrillic_u_straight_bar': 0x10004b1,
'Cyrillic_HA_descender': 0x10004b2,
'Cyrillic_ha_descender': 0x10004b3,
'Cyrillic_CHE_descender': 0x10004b6,
'Cyrillic_che_descender': 0x10004b7,
'Cyrillic_CHE_vertstroke': 0x10004b8,
'Cyrillic_che_vertstroke': 0x10004b9,
'Cyrillic_SHHA': 0x10004ba,
'Cyrillic_shha': 0x10004bb,
'Cyrillic_SCHWA': 0x10004d8,
'Cyrillic_schwa': 0x10004d9,
'Cyrillic_I_macron': 0x10004e2,
'Cyrillic_i_macron': 0x10004e3,
'Cyrillic_O_bar': 0x10004e8,
'Cyrillic_o_bar': 0x10004e9,
'Cyrillic_U_macron': 0x10004ee,
'Cyrillic_u_macron': 0x10004ef,
'Serbian_dje': 0x06a1,
'Macedonia_gje': 0x06a2,
'Cyrillic_io': 0x06a3,
'Ukrainian_ie': 0x06a4,
'Ukranian_je': 0x06a4,
'Macedonia_dse': 0x06a5,
'Ukrainian_i': 0x06a6,
'Ukranian_i': 0x06a6,
'Ukrainian_yi': 0x06a7,
'Ukranian_yi': 0x06a7,
'Cyrillic_je': 0x06a8,
'Serbian_je': 0x06a8,
'Cyrillic_lje': 0x06a9,
'Serbian_lje': 0x06a9,
'Cyrillic_nje': 0x06aa,
'Serbian_nje': 0x06aa,
'Serbian_tshe': 0x06ab,
'Macedonia_kje': 0x06ac,
'Ukrainian_ghe_with_upturn': 0x06ad,
'Byelorussian_shortu': 0x06ae,
'Cyrillic_dzhe': 0x06af,
'Serbian_dze': 0x06af,
'numerosign': 0x06b0,
'Serbian_DJE': 0x06b1,
'Macedonia_GJE': 0x06b2,
'Cyrillic_IO': 0x06b3,
'Ukrainian_IE': 0x06b4,
'Ukranian_JE': 0x06b4,
'Macedonia_DSE': 0x06b5,
'Ukrainian_I': 0x06b6,
'Ukranian_I': 0x06b6,
'Ukrainian_YI': 0x06b7,
'Ukranian_YI': 0x06b7,
'Cyrillic_JE': 0x06b8,
'Serbian_JE': 0x06b8,
'Cyrillic_LJE': 0x06b9,
'Serbian_LJE': 0x06b9,
'Cyrillic_NJE': 0x06ba,
'Serbian_NJE': 0x06ba,
'Serbian_TSHE': 0x06bb,
'Macedonia_KJE': 0x06bc,
'Ukrainian_GHE_WITH_UPTURN': 0x06bd,
'Byelorussian_SHORTU': 0x06be,
'Cyrillic_DZHE': 0x06bf,
'Serbian_DZE': 0x06bf,
'Cyrillic_yu': 0x06c0,
'Cyrillic_a': 0x06c1,
'Cyrillic_be': 0x06c2,
'Cyrillic_tse': 0x06c3,
'Cyrillic_de': 0x06c4,
'Cyrillic_ie': 0x06c5,
'Cyrillic_ef': 0x06c6,
'Cyrillic_ghe': 0x06c7,
'Cyrillic_ha': 0x06c8,
'Cyrillic_i': 0x06c9,
'Cyrillic_shorti': 0x06ca,
'Cyrillic_ka': 0x06cb,
'Cyrillic_el': 0x06cc,
'Cyrillic_em': 0x06cd,
'Cyrillic_en': 0x06ce,
'Cyrillic_o': 0x06cf,
'Cyrillic_pe': 0x06d0,
'Cyrillic_ya': 0x06d1,
'Cyrillic_er': 0x06d2,
'Cyrillic_es': 0x06d3,
'Cyrillic_te': 0x06d4,
'Cyrillic_u': 0x06d5,
'Cyrillic_zhe': 0x06d6,
'Cyrillic_ve': 0x06d7,
'Cyrillic_softsign': 0x06d8,
'Cyrillic_yeru': 0x06d9,
'Cyrillic_ze': 0x06da,
'Cyrillic_sha': 0x06db,
'Cyrillic_e': 0x06dc,
'Cyrillic_shcha': 0x06dd,
'Cyrillic_che': 0x06de,
'Cyrillic_hardsign': 0x06df,
'Cyrillic_YU': 0x06e0,
'Cyrillic_A': 0x06e1,
'Cyrillic_BE': 0x06e2,
'Cyrillic_TSE': 0x06e3,
'Cyrillic_DE': 0x06e4,
'Cyrillic_IE': 0x06e5,
'Cyrillic_EF': 0x06e6,
'Cyrillic_GHE': 0x06e7,
'Cyrillic_HA': 0x06e8,
'Cyrillic_I': 0x06e9,
'Cyrillic_SHORTI': 0x06ea,
'Cyrillic_KA': 0x06eb,
'Cyrillic_EL': 0x06ec,
'Cyrillic_EM': 0x06ed,
'Cyrillic_EN': 0x06ee,
'Cyrillic_O': 0x06ef,
'Cyrillic_PE': 0x06f0,
'Cyrillic_YA': 0x06f1,
'Cyrillic_ER': 0x06f2,
'Cyrillic_ES': 0x06f3,
'Cyrillic_TE': 0x06f4,
'Cyrillic_U': 0x06f5,
'Cyrillic_ZHE': 0x06f6,
'Cyrillic_VE': 0x06f7,
'Cyrillic_SOFTSIGN': 0x06f8,
'Cyrillic_YERU': 0x06f9,
'Cyrillic_ZE': 0x06fa,
'Cyrillic_SHA': 0x06fb,
'Cyrillic_E': 0x06fc,
'Cyrillic_SHCHA': 0x06fd,
'Cyrillic_CHE': 0x06fe,
'Cyrillic_HARDSIGN': 0x06ff,
'Greek_ALPHAaccent': 0x07a1,
'Greek_EPSILONaccent': 0x07a2,
'Greek_ETAaccent': 0x07a3,
'Greek_IOTAaccent': 0x07a4,
'Greek_IOTAdieresis': 0x07a5,
'Greek_IOTAdiaeresis': 0x07a5,
'Greek_OMICRONaccent': 0x07a7,
'Greek_UPSILONaccent': 0x07a8,
'Greek_UPSILONdieresis': 0x07a9,
'Greek_OMEGAaccent': 0x07ab,
'Greek_accentdieresis': 0x07ae,
'Greek_horizbar': 0x07af,
'Greek_alphaaccent': 0x07b1,
'Greek_epsilonaccent': 0x07b2,
'Greek_etaaccent': 0x07b3,
'Greek_iotaaccent': 0x07b4,
'Greek_iotadieresis': 0x07b5,
'Greek_iotaaccentdieresis': 0x07b6,
'Greek_omicronaccent': 0x07b7,
'Greek_upsilonaccent': 0x07b8,
'Greek_upsilondieresis': 0x07b9,
'Greek_upsilonaccentdieresis': 0x07ba,
'Greek_omegaaccent': 0x07bb,
'Greek_ALPHA': 0x07c1,
'Greek_BETA': 0x07c2,
'Greek_GAMMA': 0x07c3,
'Greek_DELTA': 0x07c4,
'Greek_EPSILON': 0x07c5,
'Greek_ZETA': 0x07c6,
'Greek_ETA': 0x07c7,
'Greek_THETA': 0x07c8,
'Greek_IOTA': 0x07c9,
'Greek_KAPPA': 0x07ca,
'Greek_LAMDA': 0x07cb,
'Greek_LAMBDA': 0x07cb,
'Greek_MU': 0x07cc,
'Greek_NU': 0x07cd,
'Greek_XI': 0x07ce,
'Greek_OMICRON': 0x07cf,
'Greek_PI': 0x07d0,
'Greek_RHO': 0x07d1,
'Greek_SIGMA': 0x07d2,
'Greek_TAU': 0x07d4,
'Greek_UPSILON': 0x07d5,
'Greek_PHI': 0x07d6,
'Greek_CHI': 0x07d7,
'Greek_PSI': 0x07d8,
'Greek_OMEGA': 0x07d9,
'Greek_alpha': 0x07e1,
'Greek_beta': 0x07e2,
'Greek_gamma': 0x07e3,
'Greek_delta': 0x07e4,
'Greek_epsilon': 0x07e5,
'Greek_zeta': 0x07e6,
'Greek_eta': 0x07e7,
'Greek_theta': 0x07e8,
'Greek_iota': 0x07e9,
'Greek_kappa': 0x07ea,
'Greek_lamda': 0x07eb,
'Greek_lambda': 0x07eb,
'Greek_mu': 0x07ec,
'Greek_nu': 0x07ed,
'Greek_xi': 0x07ee,
'Greek_omicron': 0x07ef,
'Greek_pi': 0x07f0,
'Greek_rho': 0x07f1,
'Greek_sigma': 0x07f2,
'Greek_finalsmallsigma': 0x07f3,
'Greek_tau': 0x07f4,
'Greek_upsilon': 0x07f5,
'Greek_phi': 0x07f6,
'Greek_chi': 0x07f7,
'Greek_psi': 0x07f8,
'Greek_omega': 0x07f9,
'Greek_switch': 0xff7e,
'leftradical': 0x08a1,
'topleftradical': 0x08a2,
'horizconnector': 0x08a3,
'topintegral': 0x08a4,
'botintegral': 0x08a5,
'vertconnector': 0x08a6,
'topleftsqbracket': 0x08a7,
'botleftsqbracket': 0x08a8,
'toprightsqbracket': 0x08a9,
'botrightsqbracket': 0x08aa,
'topleftparens': 0x08ab,
'botleftparens': 0x08ac,
'toprightparens': 0x08ad,
'botrightparens': 0x08ae,
'leftmiddlecurlybrace': 0x08af,
'rightmiddlecurlybrace': 0x08b0,
'topleftsummation': 0x08b1,
'botleftsummation': 0x08b2,
'topvertsummationconnector': 0x08b3,
'botvertsummationconnector': 0x08b4,
'toprightsummation': 0x08b5,
'botrightsummation': 0x08b6,
'rightmiddlesummation': 0x08b7,
'lessthanequal': 0x08bc,
'notequal': 0x08bd,
'greaterthanequal': 0x08be,
'integral': 0x08bf,
'therefore': 0x08c0,
'variation': 0x08c1,
'infinity': 0x08c2,
'nabla': 0x08c5,
'approximate': 0x08c8,
'similarequal': 0x08c9,
'ifonlyif': 0x08cd,
'implies': 0x08ce,
'identical': 0x08cf,
'radical': 0x08d6,
'includedin': 0x08da,
'includes': 0x08db,
'intersection': 0x08dc,
'union': 0x08dd,
'logicaland': 0x08de,
'logicalor': 0x08df,
'partialderivative': 0x08ef,
'function': 0x08f6,
'leftarrow': 0x08fb,
'uparrow': 0x08fc,
'rightarrow': 0x08fd,
'downarrow': 0x08fe,
'blank': 0x09df,
'soliddiamond': 0x09e0,
'checkerboard': 0x09e1,
'ht': 0x09e2,
'ff': 0x09e3,
'cr': 0x09e4,
'lf': 0x09e5,
'nl': 0x09e8,
'vt': 0x09e9,
'lowrightcorner': 0x09ea,
'uprightcorner': 0x09eb,
'upleftcorner': 0x09ec,
'lowleftcorner': 0x09ed,
'crossinglines': 0x09ee,
'horizlinescan1': 0x09ef,
'horizlinescan3': 0x09f0,
'horizlinescan5': 0x09f1,
'horizlinescan7': 0x09f2,
'horizlinescan9': 0x09f3,
'leftt': 0x09f4,
'rightt': 0x09f5,
'bott': 0x09f6,
'topt': 0x09f7,
'vertbar': 0x09f8,
'emspace': 0x0aa1,
'enspace': 0x0aa2,
'em3space': 0x0aa3,
'em4space': 0x0aa4,
'digitspace': 0x0aa5,
'punctspace': 0x0aa6,
'thinspace': 0x0aa7,
'hairspace': 0x0aa8,
'emdash': 0x0aa9,
'endash': 0x0aaa,
'signifblank': 0x0aac,
'ellipsis': 0x0aae,
'doubbaselinedot': 0x0aaf,
'onethird': 0x0ab0,
'twothirds': 0x0ab1,
'onefifth': 0x0ab2,
'twofifths': 0x0ab3,
'threefifths': 0x0ab4,
'fourfifths': 0x0ab5,
'onesixth': 0x0ab6,
'fivesixths': 0x0ab7,
'careof': 0x0ab8,
'figdash': 0x0abb,
'leftanglebracket': 0x0abc,
'decimalpoint': 0x0abd,
'rightanglebracket': 0x0abe,
'marker': 0x0abf,
'oneeighth': 0x0ac3,
'threeeighths': 0x0ac4,
'fiveeighths': 0x0ac5,
'seveneighths': 0x0ac6,
'trademark': 0x0ac9,
'signaturemark': 0x0aca,
'trademarkincircle': 0x0acb,
'leftopentriangle': 0x0acc,
'rightopentriangle': 0x0acd,
'emopencircle': 0x0ace,
'emopenrectangle': 0x0acf,
'leftsinglequotemark': 0x0ad0,
'rightsinglequotemark': 0x0ad1,
'leftdoublequotemark': 0x0ad2,
'rightdoublequotemark': 0x0ad3,
'prescription': 0x0ad4,
'minutes': 0x0ad6,
'seconds': 0x0ad7,
'latincross': 0x0ad9,
'hexagram': 0x0ada,
'filledrectbullet': 0x0adb,
'filledlefttribullet': 0x0adc,
'filledrighttribullet': 0x0add,
'emfilledcircle': 0x0ade,
'emfilledrect': 0x0adf,
'enopencircbullet': 0x0ae0,
'enopensquarebullet': 0x0ae1,
'openrectbullet': 0x0ae2,
'opentribulletup': 0x0ae3,
'opentribulletdown': 0x0ae4,
'openstar': 0x0ae5,
'enfilledcircbullet': 0x0ae6,
'enfilledsqbullet': 0x0ae7,
'filledtribulletup': 0x0ae8,
'filledtribulletdown': 0x0ae9,
'leftpointer': 0x0aea,
'rightpointer': 0x0aeb,
'club': 0x0aec,
'diamond': 0x0aed,
'heart': 0x0aee,
'maltesecross': 0x0af0,
'dagger': 0x0af1,
'doubledagger': 0x0af2,
'checkmark': 0x0af3,
'ballotcross': 0x0af4,
'musicalsharp': 0x0af5,
'musicalflat': 0x0af6,
'malesymbol': 0x0af7,
'femalesymbol': 0x0af8,
'telephone': 0x0af9,
'telephonerecorder': 0x0afa,
'phonographcopyright': 0x0afb,
'caret': 0x0afc,
'singlelowquotemark': 0x0afd,
'doublelowquotemark': 0x0afe,
'cursor': 0x0aff,
'leftcaret': 0x0ba3,
'rightcaret': 0x0ba6,
'downcaret': 0x0ba8,
'upcaret': 0x0ba9,
'overbar': 0x0bc0,
'downtack': 0x0bc2,
'upshoe': 0x0bc3,
'downstile': 0x0bc4,
'underbar': 0x0bc6,
'jot': 0x0bca,
'quad': 0x0bcc,
'uptack': 0x0bce,
'circle': 0x0bcf,
'upstile': 0x0bd3,
'downshoe': 0x0bd6,
'rightshoe': 0x0bd8,
'leftshoe': 0x0bda,
'lefttack': 0x0bdc,
'righttack': 0x0bfc,
'hebrew_doublelowline': 0x0cdf,
'hebrew_aleph': 0x0ce0,
'hebrew_bet': 0x0ce1,
'hebrew_beth': 0x0ce1,
'hebrew_gimel': 0x0ce2,
'hebrew_gimmel': 0x0ce2,
'hebrew_dalet': 0x0ce3,
'hebrew_daleth': 0x0ce3,
'hebrew_he': 0x0ce4,
'hebrew_waw': 0x0ce5,
'hebrew_zain': 0x0ce6,
'hebrew_zayin': 0x0ce6,
'hebrew_chet': 0x0ce7,
'hebrew_het': 0x0ce7,
'hebrew_tet': 0x0ce8,
'hebrew_teth': 0x0ce8,
'hebrew_yod': 0x0ce9,
'hebrew_finalkaph': 0x0cea,
'hebrew_kaph': 0x0ceb,
'hebrew_lamed': 0x0cec,
'hebrew_finalmem': 0x0ced,
'hebrew_mem': 0x0cee,
'hebrew_finalnun': 0x0cef,
'hebrew_nun': 0x0cf0,
'hebrew_samech': 0x0cf1,
'hebrew_samekh': 0x0cf1,
'hebrew_ayin': 0x0cf2,
'hebrew_finalpe': 0x0cf3,
'hebrew_pe': 0x0cf4,
'hebrew_finalzade': 0x0cf5,
'hebrew_finalzadi': 0x0cf5,
'hebrew_zade': 0x0cf6,
'hebrew_zadi': 0x0cf6,
'hebrew_qoph': 0x0cf7,
'hebrew_kuf': 0x0cf7,
'hebrew_resh': 0x0cf8,
'hebrew_shin': 0x0cf9,
'hebrew_taw': 0x0cfa,
'hebrew_taf': 0x0cfa,
'Hebrew_switch': 0xff7e,
'Thai_kokai': 0x0da1,
'Thai_khokhai': 0x0da2,
'Thai_khokhuat': 0x0da3,
'Thai_khokhwai': 0x0da4,
'Thai_khokhon': 0x0da5,
'Thai_khorakhang': 0x0da6,
'Thai_ngongu': 0x0da7,
'Thai_chochan': 0x0da8,
'Thai_choching': 0x0da9,
'Thai_chochang': 0x0daa,
'Thai_soso': 0x0dab,
'Thai_chochoe': 0x0dac,
'Thai_yoying': 0x0dad,
'Thai_dochada': 0x0dae,
'Thai_topatak': 0x0daf,
'Thai_thothan': 0x0db0,
'Thai_thonangmontho': 0x0db1,
'Thai_thophuthao': 0x0db2,
'Thai_nonen': 0x0db3,
'Thai_dodek': 0x0db4,
'Thai_totao': 0x0db5,
'Thai_thothung': 0x0db6,
'Thai_thothahan': 0x0db7,
'Thai_thothong': 0x0db8,
'Thai_nonu': 0x0db9,
'Thai_bobaimai': 0x0dba,
'Thai_popla': 0x0dbb,
'Thai_phophung': 0x0dbc,
'Thai_fofa': 0x0dbd,
'Thai_phophan': 0x0dbe,
'Thai_fofan': 0x0dbf,
'Thai_phosamphao': 0x0dc0,
'Thai_moma': 0x0dc1,
'Thai_yoyak': 0x0dc2,
'Thai_rorua': 0x0dc3,
'Thai_ru': 0x0dc4,
'Thai_loling': 0x0dc5,
'Thai_lu': 0x0dc6,
'Thai_wowaen': 0x0dc7,
'Thai_sosala': 0x0dc8,
'Thai_sorusi': 0x0dc9,
'Thai_sosua': 0x0dca,
'Thai_hohip': 0x0dcb,
'Thai_lochula': 0x0dcc,
'Thai_oang': 0x0dcd,
'Thai_honokhuk': 0x0dce,
'Thai_paiyannoi': 0x0dcf,
'Thai_saraa': 0x0dd0,
'Thai_maihanakat': 0x0dd1,
'Thai_saraaa': 0x0dd2,
'Thai_saraam': 0x0dd3,
'Thai_sarai': 0x0dd4,
'Thai_saraii': 0x0dd5,
'Thai_saraue': 0x0dd6,
'Thai_sarauee': 0x0dd7,
'Thai_sarau': 0x0dd8,
'Thai_sarauu': 0x0dd9,
'Thai_phinthu': 0x0dda,
'Thai_maihanakat_maitho': 0x0dde,
'Thai_baht': 0x0ddf,
'Thai_sarae': 0x0de0,
'Thai_saraae': 0x0de1,
'Thai_sarao': 0x0de2,
'Thai_saraaimaimuan': 0x0de3,
'Thai_saraaimaimalai': 0x0de4,
'Thai_lakkhangyao': 0x0de5,
'Thai_maiyamok': 0x0de6,
'Thai_maitaikhu': 0x0de7,
'Thai_maiek': 0x0de8,
'Thai_maitho': 0x0de9,
'Thai_maitri': 0x0dea,
'Thai_maichattawa': 0x0deb,
'Thai_thanthakhat': 0x0dec,
'Thai_nikhahit': 0x0ded,
'Thai_leksun': 0x0df0,
'Thai_leknung': 0x0df1,
'Thai_leksong': 0x0df2,
'Thai_leksam': 0x0df3,
'Thai_leksi': 0x0df4,
'Thai_lekha': 0x0df5,
'Thai_lekhok': 0x0df6,
'Thai_lekchet': 0x0df7,
'Thai_lekpaet': 0x0df8,
'Thai_lekkao': 0x0df9,
'Hangul': 0xff31,
'Hangul_Start': 0xff32,
'Hangul_End': 0xff33,
'Hangul_Hanja': 0xff34,
'Hangul_Jamo': 0xff35,
'Hangul_Romaja': 0xff36,
'Hangul_Codeinput': 0xff37,
'Hangul_Jeonja': 0xff38,
'Hangul_Banja': 0xff39,
'Hangul_PreHanja': 0xff3a,
'Hangul_PostHanja': 0xff3b,
'Hangul_SingleCandidate': 0xff3c,
'Hangul_MultipleCandidate': 0xff3d,
'Hangul_PreviousCandidate': 0xff3e,
'Hangul_Special': 0xff3f,
'Hangul_switch': 0xff7e,
'Hangul_Kiyeog': 0x0ea1,
'Hangul_SsangKiyeog': 0x0ea2,
'Hangul_KiyeogSios': 0x0ea3,
'Hangul_Nieun': 0x0ea4,
'Hangul_NieunJieuj': 0x0ea5,
'Hangul_NieunHieuh': 0x0ea6,
'Hangul_Dikeud': 0x0ea7,
'Hangul_SsangDikeud': 0x0ea8,
'Hangul_Rieul': 0x0ea9,
'Hangul_RieulKiyeog': 0x0eaa,
'Hangul_RieulMieum': 0x0eab,
'Hangul_RieulPieub': 0x0eac,
'Hangul_RieulSios': 0x0ead,
'Hangul_RieulTieut': 0x0eae,
'Hangul_RieulPhieuf': 0x0eaf,
'Hangul_RieulHieuh': 0x0eb0,
'Hangul_Mieum': 0x0eb1,
'Hangul_Pieub': 0x0eb2,
'Hangul_SsangPieub': 0x0eb3,
'Hangul_PieubSios': 0x0eb4,
'Hangul_Sios': 0x0eb5,
'Hangul_SsangSios': 0x0eb6,
'Hangul_Ieung': 0x0eb7,
'Hangul_Jieuj': 0x0eb8,
'Hangul_SsangJieuj': 0x0eb9,
'Hangul_Cieuc': 0x0eba,
'Hangul_Khieuq': 0x0ebb,
'Hangul_Tieut': 0x0ebc,
'Hangul_Phieuf': 0x0ebd,
'Hangul_Hieuh': 0x0ebe,
'Hangul_A': 0x0ebf,
'Hangul_AE': 0x0ec0,
'Hangul_YA': 0x0ec1,
'Hangul_YAE': 0x0ec2,
'Hangul_EO': 0x0ec3,
'Hangul_E': 0x0ec4,
'Hangul_YEO': 0x0ec5,
'Hangul_YE': 0x0ec6,
'Hangul_O': 0x0ec7,
'Hangul_WA': 0x0ec8,
'Hangul_WAE': 0x0ec9,
'Hangul_OE': 0x0eca,
'Hangul_YO': 0x0ecb,
'Hangul_U': 0x0ecc,
'Hangul_WEO': 0x0ecd,
'Hangul_WE': 0x0ece,
'Hangul_WI': 0x0ecf,
'Hangul_YU': 0x0ed0,
'Hangul_EU': 0x0ed1,
'Hangul_YI': 0x0ed2,
'Hangul_I': 0x0ed3,
'Hangul_J_Kiyeog': 0x0ed4,
'Hangul_J_SsangKiyeog': 0x0ed5,
'Hangul_J_KiyeogSios': 0x0ed6,
'Hangul_J_Nieun': 0x0ed7,
'Hangul_J_NieunJieuj': 0x0ed8,
'Hangul_J_NieunHieuh': 0x0ed9,
'Hangul_J_Dikeud': 0x0eda,
'Hangul_J_Rieul': 0x0edb,
'Hangul_J_RieulKiyeog': 0x0edc,
'Hangul_J_RieulMieum': 0x0edd,
'Hangul_J_RieulPieub': 0x0ede,
'Hangul_J_RieulSios': 0x0edf,
'Hangul_J_RieulTieut': 0x0ee0,
'Hangul_J_RieulPhieuf': 0x0ee1,
'Hangul_J_RieulHieuh': 0x0ee2,
'Hangul_J_Mieum': 0x0ee3,
'Hangul_J_Pieub': 0x0ee4,
'Hangul_J_PieubSios': 0x0ee5,
'Hangul_J_Sios': 0x0ee6,
'Hangul_J_SsangSios': 0x0ee7,
'Hangul_J_Ieung': 0x0ee8,
'Hangul_J_Jieuj': 0x0ee9,
'Hangul_J_Cieuc': 0x0eea,
'Hangul_J_Khieuq': 0x0eeb,
'Hangul_J_Tieut': 0x0eec,
'Hangul_J_Phieuf': 0x0eed,
'Hangul_J_Hieuh': 0x0eee,
'Hangul_RieulYeorinHieuh': 0x0eef,
'Hangul_SunkyeongeumMieum': 0x0ef0,
'Hangul_SunkyeongeumPieub': 0x0ef1,
'Hangul_PanSios': 0x0ef2,
'Hangul_KkogjiDalrinIeung': 0x0ef3,
'Hangul_SunkyeongeumPhieuf': 0x0ef4,
'Hangul_YeorinHieuh': 0x0ef5,
'Hangul_AraeA': 0x0ef6,
'Hangul_AraeAE': 0x0ef7,
'Hangul_J_PanSios': 0x0ef8,
'Hangul_J_KkogjiDalrinIeung': 0x0ef9,
'Hangul_J_YeorinHieuh': 0x0efa,
'Korean_Won': 0x0eff,
'Armenian_ligature_ew': 0x1000587,
'Armenian_full_stop': 0x1000589,
'Armenian_verjaket': 0x1000589,
'Armenian_separation_mark': 0x100055d,
'Armenian_but': 0x100055d,
'Armenian_hyphen': 0x100058a,
'Armenian_yentamna': 0x100058a,
'Armenian_exclam': 0x100055c,
'Armenian_amanak': 0x100055c,
'Armenian_accent': 0x100055b,
'Armenian_shesht': 0x100055b,
'Armenian_question': 0x100055e,
'Armenian_paruyk': 0x100055e,
'Armenian_AYB': 0x1000531,
'Armenian_ayb': 0x1000561,
'Armenian_BEN': 0x1000532,
'Armenian_ben': 0x1000562,
'Armenian_GIM': 0x1000533,
'Armenian_gim': 0x1000563,
'Armenian_DA': 0x1000534,
'Armenian_da': 0x1000564,
'Armenian_YECH': 0x1000535,
'Armenian_yech': 0x1000565,
'Armenian_ZA': 0x1000536,
'Armenian_za': 0x1000566,
'Armenian_E': 0x1000537,
'Armenian_e': 0x1000567,
'Armenian_AT': 0x1000538,
'Armenian_at': 0x1000568,
'Armenian_TO': 0x1000539,
'Armenian_to': 0x1000569,
'Armenian_ZHE': 0x100053a,
'Armenian_zhe': 0x100056a,
'Armenian_INI': 0x100053b,
'Armenian_ini': 0x100056b,
'Armenian_LYUN': 0x100053c,
'Armenian_lyun': 0x100056c,
'Armenian_KHE': 0x100053d,
'Armenian_khe': 0x100056d,
'Armenian_TSA': 0x100053e,
'Armenian_tsa': 0x100056e,
'Armenian_KEN': 0x100053f,
'Armenian_ken': 0x100056f,
'Armenian_HO': 0x1000540,
'Armenian_ho': 0x1000570,
'Armenian_DZA': 0x1000541,
'Armenian_dza': 0x1000571,
'Armenian_GHAT': 0x1000542,
'Armenian_ghat': 0x1000572,
'Armenian_TCHE': 0x1000543,
'Armenian_tche': 0x1000573,
'Armenian_MEN': 0x1000544,
'Armenian_men': 0x1000574,
'Armenian_HI': 0x1000545,
'Armenian_hi': 0x1000575,
'Armenian_NU': 0x1000546,
'Armenian_nu': 0x1000576,
'Armenian_SHA': 0x1000547,
'Armenian_sha': 0x1000577,
'Armenian_VO': 0x1000548,
'Armenian_vo': 0x1000578,
'Armenian_CHA': 0x1000549,
'Armenian_cha': 0x1000579,
'Armenian_PE': 0x100054a,
'Armenian_pe': 0x100057a,
'Armenian_JE': 0x100054b,
'Armenian_je': 0x100057b,
'Armenian_RA': 0x100054c,
'Armenian_ra': 0x100057c,
'Armenian_SE': 0x100054d,
'Armenian_se': 0x100057d,
'Armenian_VEV': 0x100054e,
'Armenian_vev': 0x100057e,
'Armenian_TYUN': 0x100054f,
'Armenian_tyun': 0x100057f,
'Armenian_RE': 0x1000550,
'Armenian_re': 0x1000580,
'Armenian_TSO': 0x1000551,
'Armenian_tso': 0x1000581,
'Armenian_VYUN': 0x1000552,
'Armenian_vyun': 0x1000582,
'Armenian_PYUR': 0x1000553,
'Armenian_pyur': 0x1000583,
'Armenian_KE': 0x1000554,
'Armenian_ke': 0x1000584,
'Armenian_O': 0x1000555,
'Armenian_o': 0x1000585,
'Armenian_FE': 0x1000556,
'Armenian_fe': 0x1000586,
'Armenian_apostrophe': 0x100055a,
'Georgian_an': 0x10010d0,
'Georgian_ban': 0x10010d1,
'Georgian_gan': 0x10010d2,
'Georgian_don': 0x10010d3,
'Georgian_en': 0x10010d4,
'Georgian_vin': 0x10010d5,
'Georgian_zen': 0x10010d6,
'Georgian_tan': 0x10010d7,
'Georgian_in': 0x10010d8,
'Georgian_kan': 0x10010d9,
'Georgian_las': 0x10010da,
'Georgian_man': 0x10010db,
'Georgian_nar': 0x10010dc,
'Georgian_on': 0x10010dd,
'Georgian_par': 0x10010de,
'Georgian_zhar': 0x10010df,
'Georgian_rae': 0x10010e0,
'Georgian_san': 0x10010e1,
'Georgian_tar': 0x10010e2,
'Georgian_un': 0x10010e3,
'Georgian_phar': 0x10010e4,
'Georgian_khar': 0x10010e5,
'Georgian_ghan': 0x10010e6,
'Georgian_qar': 0x10010e7,
'Georgian_shin': 0x10010e8,
'Georgian_chin': 0x10010e9,
'Georgian_can': 0x10010ea,
'Georgian_jil': 0x10010eb,
'Georgian_cil': 0x10010ec,
'Georgian_char': 0x10010ed,
'Georgian_xan': 0x10010ee,
'Georgian_jhan': 0x10010ef,
'Georgian_hae': 0x10010f0,
'Georgian_he': 0x10010f1,
'Georgian_hie': 0x10010f2,
'Georgian_we': 0x10010f3,
'Georgian_har': 0x10010f4,
'Georgian_hoe': 0x10010f5,
'Georgian_fi': 0x10010f6,
'Xabovedot': 0x1001e8a,
'Ibreve': 0x100012c,
'Zstroke': 0x10001b5,
'Gcaron': 0x10001e6,
'Ocaron': 0x10001d1,
'Obarred': 0x100019f,
'xabovedot': 0x1001e8b,
'ibreve': 0x100012d,
'zstroke': 0x10001b6,
'gcaron': 0x10001e7,
'ocaron': 0x10001d2,
'obarred': 0x1000275,
'SCHWA': 0x100018f,
'schwa': 0x1000259,
'Lbelowdot': 0x1001e36,
'lbelowdot': 0x1001e37,
'Abelowdot': 0x1001ea0,
'abelowdot': 0x1001ea1,
'Ahook': 0x1001ea2,
'ahook': 0x1001ea3,
'Acircumflexacute': 0x1001ea4,
'acircumflexacute': 0x1001ea5,
'Acircumflexgrave': 0x1001ea6,
'acircumflexgrave': 0x1001ea7,
'Acircumflexhook': 0x1001ea8,
'acircumflexhook': 0x1001ea9,
'Acircumflextilde': 0x1001eaa,
'acircumflextilde': 0x1001eab,
'Acircumflexbelowdot': 0x1001eac,
'acircumflexbelowdot': 0x1001ead,
'Abreveacute': 0x1001eae,
'abreveacute': 0x1001eaf,
'Abrevegrave': 0x1001eb0,
'abrevegrave': 0x1001eb1,
'Abrevehook': 0x1001eb2,
'abrevehook': 0x1001eb3,
'Abrevetilde': 0x1001eb4,
'abrevetilde': 0x1001eb5,
'Abrevebelowdot': 0x1001eb6,
'abrevebelowdot': 0x1001eb7,
'Ebelowdot': 0x1001eb8,
'ebelowdot': 0x1001eb9,
'Ehook': 0x1001eba,
'ehook': 0x1001ebb,
'Etilde': 0x1001ebc,
'etilde': 0x1001ebd,
'Ecircumflexacute': 0x1001ebe,
'ecircumflexacute': 0x1001ebf,
'Ecircumflexgrave': 0x1001ec0,
'ecircumflexgrave': 0x1001ec1,
'Ecircumflexhook': 0x1001ec2,
'ecircumflexhook': 0x1001ec3,
'Ecircumflextilde': 0x1001ec4,
'ecircumflextilde': 0x1001ec5,
'Ecircumflexbelowdot': 0x1001ec6,
'ecircumflexbelowdot': 0x1001ec7,
'Ihook': 0x1001ec8,
'ihook': 0x1001ec9,
'Ibelowdot': 0x1001eca,
'ibelowdot': 0x1001ecb,
'Obelowdot': 0x1001ecc,
'obelowdot': 0x1001ecd,
'Ohook': 0x1001ece,
'ohook': 0x1001ecf,
'Ocircumflexacute': 0x1001ed0,
'ocircumflexacute': 0x1001ed1,
'Ocircumflexgrave': 0x1001ed2,
'ocircumflexgrave': 0x1001ed3,
'Ocircumflexhook': 0x1001ed4,
'ocircumflexhook': 0x1001ed5,
'Ocircumflextilde': 0x1001ed6,
'ocircumflextilde': 0x1001ed7,
'Ocircumflexbelowdot': 0x1001ed8,
'ocircumflexbelowdot': 0x1001ed9,
'Ohornacute': 0x1001eda,
'ohornacute': 0x1001edb,
'Ohorngrave': 0x1001edc,
'ohorngrave': 0x1001edd,
'Ohornhook': 0x1001ede,
'ohornhook': 0x1001edf,
'Ohorntilde': 0x1001ee0,
'ohorntilde': 0x1001ee1,
'Ohornbelowdot': 0x1001ee2,
'ohornbelowdot': 0x1001ee3,
'Ubelowdot': 0x1001ee4,
'ubelowdot': 0x1001ee5,
'Uhook': 0x1001ee6,
'uhook': 0x1001ee7,
'Uhornacute': 0x1001ee8,
'uhornacute': 0x1001ee9,
'Uhorngrave': 0x1001eea,
'uhorngrave': 0x1001eeb,
'Uhornhook': 0x1001eec,
'uhornhook': 0x1001eed,
'Uhorntilde': 0x1001eee,
'uhorntilde': 0x1001eef,
'Uhornbelowdot': 0x1001ef0,
'uhornbelowdot': 0x1001ef1,
'Ybelowdot': 0x1001ef4,
'ybelowdot': 0x1001ef5,
'Yhook': 0x1001ef6,
'yhook': 0x1001ef7,
'Ytilde': 0x1001ef8,
'ytilde': 0x1001ef9,
'Ohorn': 0x10001a0,
'ohorn': 0x10001a1,
'Uhorn': 0x10001af,
'uhorn': 0x10001b0,
'EcuSign': 0x10020a0,
'ColonSign': 0x10020a1,
'CruzeiroSign': 0x10020a2,
'FFrancSign': 0x10020a3,
'LiraSign': 0x10020a4,
'MillSign': 0x10020a5,
'NairaSign': 0x10020a6,
'PesetaSign': 0x10020a7,
'RupeeSign': 0x10020a8,
'WonSign': 0x10020a9,
'NewSheqelSign': 0x10020aa,
'DongSign': 0x10020ab,
'EuroSign': 0x20ac,
'zerosuperior': 0x1002070,
'foursuperior': 0x1002074,
'fivesuperior': 0x1002075,
'sixsuperior': 0x1002076,
'sevensuperior': 0x1002077,
'eightsuperior': 0x1002078,
'ninesuperior': 0x1002079,
'zerosubscript': 0x1002080,
'onesubscript': 0x1002081,
'twosubscript': 0x1002082,
'threesubscript': 0x1002083,
'foursubscript': 0x1002084,
'fivesubscript': 0x1002085,
'sixsubscript': 0x1002086,
'sevensubscript': 0x1002087,
'eightsubscript': 0x1002088,
'ninesubscript': 0x1002089,
'partdifferential': 0x1002202,
'emptyset': 0x1002205,
'elementof': 0x1002208,
'notelementof': 0x1002209,
'containsas': 0x100220B,
'squareroot': 0x100221A,
'cuberoot': 0x100221B,
'fourthroot': 0x100221C,
'dintegral': 0x100222C,
'tintegral': 0x100222D,
'because': 0x1002235,
'approxeq': 0x1002248,
'notapproxeq': 0x1002247,
'notidentical': 0x1002262,
'stricteq': 0x1002263,
'braille_dot_1': 0xfff1,
'braille_dot_2': 0xfff2,
'braille_dot_3': 0xfff3,
'braille_dot_4': 0xfff4,
'braille_dot_5': 0xfff5,
'braille_dot_6': 0xfff6,
'braille_dot_7': 0xfff7,
'braille_dot_8': 0xfff8,
'braille_dot_9': 0xfff9,
'braille_dot_10': 0xfffa,
'braille_blank': 0x1002800,
'braille_dots_1': 0x1002801,
'braille_dots_2': 0x1002802,
'braille_dots_12': 0x1002803,
'braille_dots_3': 0x1002804,
'braille_dots_13': 0x1002805,
'braille_dots_23': 0x1002806,
'braille_dots_123': 0x1002807,
'braille_dots_4': 0x1002808,
'braille_dots_14': 0x1002809,
'braille_dots_24': 0x100280a,
'braille_dots_124': 0x100280b,
'braille_dots_34': 0x100280c,
'braille_dots_134': 0x100280d,
'braille_dots_234': 0x100280e,
'braille_dots_1234': 0x100280f,
'braille_dots_5': 0x1002810,
'braille_dots_15': 0x1002811,
'braille_dots_25': 0x1002812,
'braille_dots_125': 0x1002813,
'braille_dots_35': 0x1002814,
'braille_dots_135': 0x1002815,
'braille_dots_235': 0x1002816,
'braille_dots_1235': 0x1002817,
'braille_dots_45': 0x1002818,
'braille_dots_145': 0x1002819,
'braille_dots_245': 0x100281a,
'braille_dots_1245': 0x100281b,
'braille_dots_345': 0x100281c,
'braille_dots_1345': 0x100281d,
'braille_dots_2345': 0x100281e,
'braille_dots_12345': 0x100281f,
'braille_dots_6': 0x1002820,
'braille_dots_16': 0x1002821,
'braille_dots_26': 0x1002822,
'braille_dots_126': 0x1002823,
'braille_dots_36': 0x1002824,
'braille_dots_136': 0x1002825,
'braille_dots_236': 0x1002826,
'braille_dots_1236': 0x1002827,
'braille_dots_46': 0x1002828,
'braille_dots_146': 0x1002829,
'braille_dots_246': 0x100282a,
'braille_dots_1246': 0x100282b,
'braille_dots_346': 0x100282c,
'braille_dots_1346': 0x100282d,
'braille_dots_2346': 0x100282e,
'braille_dots_12346': 0x100282f,
'braille_dots_56': 0x1002830,
'braille_dots_156': 0x1002831,
'braille_dots_256': 0x1002832,
'braille_dots_1256': 0x1002833,
'braille_dots_356': 0x1002834,
'braille_dots_1356': 0x1002835,
'braille_dots_2356': 0x1002836,
'braille_dots_12356': 0x1002837,
'braille_dots_456': 0x1002838,
'braille_dots_1456': 0x1002839,
'braille_dots_2456': 0x100283a,
'braille_dots_12456': 0x100283b,
'braille_dots_3456': 0x100283c,
'braille_dots_13456': 0x100283d,
'braille_dots_23456': 0x100283e,
'braille_dots_123456': 0x100283f,
'braille_dots_7': 0x1002840,
'braille_dots_17': 0x1002841,
'braille_dots_27': 0x1002842,
'braille_dots_127': 0x1002843,
'braille_dots_37': 0x1002844,
'braille_dots_137': 0x1002845,
'braille_dots_237': 0x1002846,
'braille_dots_1237': 0x1002847,
'braille_dots_47': 0x1002848,
'braille_dots_147': 0x1002849,
'braille_dots_247': 0x100284a,
'braille_dots_1247': 0x100284b,
'braille_dots_347': 0x100284c,
'braille_dots_1347': 0x100284d,
'braille_dots_2347': 0x100284e,
'braille_dots_12347': 0x100284f,
'braille_dots_57': 0x1002850,
'braille_dots_157': 0x1002851,
'braille_dots_257': 0x1002852,
'braille_dots_1257': 0x1002853,
'braille_dots_357': 0x1002854,
'braille_dots_1357': 0x1002855,
'braille_dots_2357': 0x1002856,
'braille_dots_12357': 0x1002857,
'braille_dots_457': 0x1002858,
'braille_dots_1457': 0x1002859,
'braille_dots_2457': 0x100285a,
'braille_dots_12457': 0x100285b,
'braille_dots_3457': 0x100285c,
'braille_dots_13457': 0x100285d,
'braille_dots_23457': 0x100285e,
'braille_dots_123457': 0x100285f,
'braille_dots_67': 0x1002860,
'braille_dots_167': 0x1002861,
'braille_dots_267': 0x1002862,
'braille_dots_1267': 0x1002863,
'braille_dots_367': 0x1002864,
'braille_dots_1367': 0x1002865,
'braille_dots_2367': 0x1002866,
'braille_dots_12367': 0x1002867,
'braille_dots_467': 0x1002868,
'braille_dots_1467': 0x1002869,
'braille_dots_2467': 0x100286a,
'braille_dots_12467': 0x100286b,
'braille_dots_3467': 0x100286c,
'braille_dots_13467': 0x100286d,
'braille_dots_23467': 0x100286e,
'braille_dots_123467': 0x100286f,
'braille_dots_567': 0x1002870,
'braille_dots_1567': 0x1002871,
'braille_dots_2567': 0x1002872,
'braille_dots_12567': 0x1002873,
'braille_dots_3567': 0x1002874,
'braille_dots_13567': 0x1002875,
'braille_dots_23567': 0x1002876,
'braille_dots_123567': 0x1002877,
'braille_dots_4567': 0x1002878,
'braille_dots_14567': 0x1002879,
'braille_dots_24567': 0x100287a,
'braille_dots_124567': 0x100287b,
'braille_dots_34567': 0x100287c,
'braille_dots_134567': 0x100287d,
'braille_dots_234567': 0x100287e,
'braille_dots_1234567': 0x100287f,
'braille_dots_8': 0x1002880,
'braille_dots_18': 0x1002881,
'braille_dots_28': 0x1002882,
'braille_dots_128': 0x1002883,
'braille_dots_38': 0x1002884,
'braille_dots_138': 0x1002885,
'braille_dots_238': 0x1002886,
'braille_dots_1238': 0x1002887,
'braille_dots_48': 0x1002888,
'braille_dots_148': 0x1002889,
'braille_dots_248': 0x100288a,
'braille_dots_1248': 0x100288b,
'braille_dots_348': 0x100288c,
'braille_dots_1348': 0x100288d,
'braille_dots_2348': 0x100288e,
'braille_dots_12348': 0x100288f,
'braille_dots_58': 0x1002890,
'braille_dots_158': 0x1002891,
'braille_dots_258': 0x1002892,
'braille_dots_1258': 0x1002893,
'braille_dots_358': 0x1002894,
'braille_dots_1358': 0x1002895,
'braille_dots_2358': 0x1002896,
'braille_dots_12358': 0x1002897,
'braille_dots_458': 0x1002898,
'braille_dots_1458': 0x1002899,
'braille_dots_2458': 0x100289a,
'braille_dots_12458': 0x100289b,
'braille_dots_3458': 0x100289c,
'braille_dots_13458': 0x100289d,
'braille_dots_23458': 0x100289e,
'braille_dots_123458': 0x100289f,
'braille_dots_68': 0x10028a0,
'braille_dots_168': 0x10028a1,
'braille_dots_268': 0x10028a2,
'braille_dots_1268': 0x10028a3,
'braille_dots_368': 0x10028a4,
'braille_dots_1368': 0x10028a5,
'braille_dots_2368': 0x10028a6,
'braille_dots_12368': 0x10028a7,
'braille_dots_468': 0x10028a8,
'braille_dots_1468': 0x10028a9,
'braille_dots_2468': 0x10028aa,
'braille_dots_12468': 0x10028ab,
'braille_dots_3468': 0x10028ac,
'braille_dots_13468': 0x10028ad,
'braille_dots_23468': 0x10028ae,
'braille_dots_123468': 0x10028af,
'braille_dots_568': 0x10028b0,
'braille_dots_1568': 0x10028b1,
'braille_dots_2568': 0x10028b2,
'braille_dots_12568': 0x10028b3,
'braille_dots_3568': 0x10028b4,
'braille_dots_13568': 0x10028b5,
'braille_dots_23568': 0x10028b6,
'braille_dots_123568': 0x10028b7,
'braille_dots_4568': 0x10028b8,
'braille_dots_14568': 0x10028b9,
'braille_dots_24568': 0x10028ba,
'braille_dots_124568': 0x10028bb,
'braille_dots_34568': 0x10028bc,
'braille_dots_134568': 0x10028bd,
'braille_dots_234568': 0x10028be,
'braille_dots_1234568': 0x10028bf,
'braille_dots_78': 0x10028c0,
'braille_dots_178': 0x10028c1,
'braille_dots_278': 0x10028c2,
'braille_dots_1278': 0x10028c3,
'braille_dots_378': 0x10028c4,
'braille_dots_1378': 0x10028c5,
'braille_dots_2378': 0x10028c6,
'braille_dots_12378': 0x10028c7,
'braille_dots_478': 0x10028c8,
'braille_dots_1478': 0x10028c9,
'braille_dots_2478': 0x10028ca,
'braille_dots_12478': 0x10028cb,
'braille_dots_3478': 0x10028cc,
'braille_dots_13478': 0x10028cd,
'braille_dots_23478': 0x10028ce,
'braille_dots_123478': 0x10028cf,
'braille_dots_578': 0x10028d0,
'braille_dots_1578': 0x10028d1,
'braille_dots_2578': 0x10028d2,
'braille_dots_12578': 0x10028d3,
'braille_dots_3578': 0x10028d4,
'braille_dots_13578': 0x10028d5,
'braille_dots_23578': 0x10028d6,
'braille_dots_123578': 0x10028d7,
'braille_dots_4578': 0x10028d8,
'braille_dots_14578': 0x10028d9,
'braille_dots_24578': 0x10028da,
'braille_dots_124578': 0x10028db,
'braille_dots_34578': 0x10028dc,
'braille_dots_134578': 0x10028dd,
'braille_dots_234578': 0x10028de,
'braille_dots_1234578': 0x10028df,
'braille_dots_678': 0x10028e0,
'braille_dots_1678': 0x10028e1,
'braille_dots_2678': 0x10028e2,
'braille_dots_12678': 0x10028e3,
'braille_dots_3678': 0x10028e4,
'braille_dots_13678': 0x10028e5,
'braille_dots_23678': 0x10028e6,
'braille_dots_123678': 0x10028e7,
'braille_dots_4678': 0x10028e8,
'braille_dots_14678': 0x10028e9,
'braille_dots_24678': 0x10028ea,
'braille_dots_124678': 0x10028eb,
'braille_dots_34678': 0x10028ec,
'braille_dots_134678': 0x10028ed,
'braille_dots_234678': 0x10028ee,
'braille_dots_1234678': 0x10028ef,
'braille_dots_5678': 0x10028f0,
'braille_dots_15678': 0x10028f1,
'braille_dots_25678': 0x10028f2,
'braille_dots_125678': 0x10028f3,
'braille_dots_35678': 0x10028f4,
'braille_dots_135678': 0x10028f5,
'braille_dots_235678': 0x10028f6,
'braille_dots_1235678': 0x10028f7,
'braille_dots_45678': 0x10028f8,
'braille_dots_145678': 0x10028f9,
'braille_dots_245678': 0x10028fa,
'braille_dots_1245678': 0x10028fb,
'braille_dots_345678': 0x10028fc,
'braille_dots_1345678': 0x10028fd,
'braille_dots_2345678': 0x10028fe,
'braille_dots_12345678': 0x10028ff,
}
| Python |
events = {
'KeyPressEvent': 2,
'KeyReleaseEvent': 3,
'ButtonPressEvent': 4,
'ButtonReleaseEvent': 5,
'MotionNotifyEvent': 6,
'EnterNotifyEvent': 7,
'LeaveNotifyEvent': 8,
'FocusInEvent': 9,
'FocusOutEvent': 10,
'KeymapNotifyEvent': 11,
'ExposeEvent': 12,
'GraphicsExposureEvent': 13,
'NoExposureEvent': 14,
'VisibilityNotifyEvent': 15,
'CreateNotifyEvent': 16,
'DestroyNotifyEvent': 17,
'UnmapNotifyEvent': 18,
'MapNotifyEvent': 19,
'MapRequestEvent': 20,
'ReparentNotifyEvent': 21,
'ConfigureNotifyEvent': 22,
'ConfigureRequestEvent': 23,
'GravityNotifyEvent': 24,
'ResizeRequestEvent': 25,
'CirculateNotifyEvent': 26,
'CirculateRequestEvent': 27,
'PropertyNotifyEvent': 28,
'SelectionClearEvent': 29,
'SelectionRequestEvent': 30,
'SelectionNotifyEvent': 31,
'ColormapNotifyEvent': 32,
'ClientMessageEvent': 33,
'MappingNotifyEvent': 34,
}
| Python |
# -*- coding: utf-8 -*-
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the GNU General Public License, Version 3.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from google.appengine.api import namespace_manager
def namespace_manager_default_namespace_for_request():
# name = os.environ['SERVER_NAME']
name = namespace_manager.google_apps_namespace()
return name
| Python |
#!/usr/bin/python
"""
HeaderID Extension for Python-Markdown
======================================
Adds ability to set HTML IDs for headers.
Basic usage:
>>> import markdown
>>> text = "# Some Header # {#some_id}"
>>> md = markdown.markdown(text, ['headerid'])
>>> md
u'<h1 id="some_id">Some Header</h1>'
All header IDs are unique:
>>> text = '''
... #Header
... #Another Header {#header}
... #Third Header {#header}'''
>>> md = markdown.markdown(text, ['headerid'])
>>> md
u'<h1 id="header">Header</h1>\\n<h1 id="header_1">Another Header</h1>\\n<h1 id="header_2">Third Header</h1>'
To fit within a html template's hierarchy, set the header base level:
>>> text = '''
... #Some Header
... ## Next Level'''
>>> md = markdown.markdown(text, ['headerid(level=3)'])
>>> md
u'<h3 id="some_header">Some Header</h3>\\n<h4 id="next_level">Next Level</h4>'
Turn off auto generated IDs:
>>> text = '''
... # Some Header
... # Header with ID # { #foo }'''
>>> md = markdown.markdown(text, ['headerid(forceid=False)'])
>>> md
u'<h1>Some Header</h1>\\n<h1 id="foo">Header with ID</h1>'
Use with MetaData extension:
>>> text = '''header_level: 2
... header_forceid: Off
...
... # A Header'''
>>> md = markdown.markdown(text, ['headerid', 'meta'])
>>> md
u'<h2>A Header</h2>'
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/HeaderId>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
from markdown import etree
import re
from string import ascii_lowercase, digits, punctuation
ID_CHARS = ascii_lowercase + digits + '-_'
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor):
""" Replacement BlockProcessor for Header IDs. """
# Detect a header at start of any line in block
RE = re.compile(r"""(^|\n)
(?P<level>\#{1,6}) # group('level') = string of hashes
(?P<header>.*?) # group('header') = Header text
\#* # optional closing hashes
(?:[ \t]*\{[ \t]*\#(?P<id>[-_:a-zA-Z0-9]+)[ \t]*\})?
(\n|$) # ^^ group('id') = id attribute
""",
re.VERBOSE)
IDs = []
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
start_level, force_id = self._get_meta()
level = len(m.group('level')) + start_level
if level > 6:
level = 6
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = m.group('header').strip()
if m.group('id'):
h.set('id', self._unique_id(m.group('id')))
elif force_id:
h.set('id', self._create_id(m.group('header').strip()))
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
message(CRITICAL, "We've got a problem header!")
def _get_meta(self):
""" Return meta data suported by this ext as a tuple """
level = int(self.config['level'][0]) - 1
force = self._str2bool(self.config['forceid'][0])
if hasattr(self.md, 'Meta'):
if self.md.Meta.has_key('header_level'):
level = int(self.md.Meta['header_level'][0]) - 1
if self.md.Meta.has_key('header_forceid'):
force = self._str2bool(self.md.Meta['header_forceid'][0])
return level, force
def _str2bool(self, s, default=False):
""" Convert a string to a booleen value. """
s = str(s)
if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
return False
elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
return True
return default
def _unique_id(self, id):
""" Ensure ID is unique. Append '_1', '_2'... if not """
while id in self.IDs:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
self.IDs.append(id)
return id
def _create_id(self, header):
""" Return ID from Header text. """
h = ''
for c in header.lower().replace(' ', '_'):
if c in ID_CHARS:
h += c
elif c not in punctuation:
h += '+'
return self._unique_id(h)
class HeaderIdExtension (markdown.Extension):
def __init__(self, configs):
# set defaults
self.config = {
'level' : ['1', 'Base level for headers.'],
'forceid' : ['True', 'Force all headers to have an id.']
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = HeaderIdProcessor(md.parser)
self.processor.md = md
self.processor.config = self.config
# Replace existing hasheader in place.
md.parser.blockprocessors['hashheader'] = self.processor
def reset(self):
self.processor.IDs = []
def makeExtension(configs=None):
return HeaderIdExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env Python
"""
Definition List Extension for Python-Markdown
=============================================
Added parsing of Definition Lists to Python-Markdown.
A simple example:
Apple
: Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.
: An american computer company.
Orange
: The fruit of an evergreen tree of the genus Citrus.
Copyright 2008 - [Waylan Limberg](http://achinghead.com)
"""
import markdown, re
from markdown import etree
class DefListProcessor(markdown.blockprocessors.BlockProcessor):
""" Process Definition Lists. """
RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
terms = [l.strip() for l in block[:m.start()].split('\n') if l.strip()]
d, theRest = self.detab(block[m.end():])
if d:
d = '%s\n%s' % (m.group(2), d)
else:
d = m.group(2)
#import ipdb; ipdb.set_trace()
sibling = self.lastChild(parent)
if not terms and sibling.tag == 'p':
# The previous paragraph contains the terms
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
# Aquire new sibling
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling and sibling.tag == 'dl':
# This is another item on an existing list
dl = sibling
if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
# This is a new list
dl = etree.SubElement(parent, 'dl')
# Add terms
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
# Add definition
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process indented children of definition list items. """
ITEM_TYPES = ['dd']
LIST_TYPES = ['dl']
def create_item(parent, block):
""" Create a new dd and parse the block with it as the parent. """
dd = markdown.etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(markdown.Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.add('defindent',
DefListIndentProcessor(md.parser),
'>indent')
md.parser.blockprocessors.add('deflist',
DefListProcessor(md.parser),
'>ulist')
def makeExtension(configs={}):
return DefListExtension(configs=configs)
| Python |
#!usr/bin/python
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
Basic Usage:
>>> import markdown
>>> text = '''Title: A Test Doc.
... Author: Waylan Limberg
... John Doe
... Blank_Data:
...
... The body. This is paragraph one.
... '''
>>> md = markdown.Markdown(['meta'])
>>> md.convert(text)
u'<p>The body. This is paragraph one.</p>'
>>> md.Meta
{u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
Make sure text without Meta Data still works (markdown < 1.6b returns a <p>).
>>> text = ' Some Code - not extra lines of meta data.'
>>> md = markdown.Markdown(['meta'])
>>> md.convert(text)
u'<pre><code>Some Code - not extra lines of meta data.\\n</code></pre>'
>>> md.Meta
{}
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
Project website: <http://www.freewisdom.org/project/python-markdown/Meta-Data>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
"""
import markdown, re
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
class MetaExtension (markdown.Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
class MetaPreprocessor(markdown.preprocessors.Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
while 1:
line = lines.pop(0)
if line.strip() == '':
break # blank line - done
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
meta[key] = [m1.group('value').strip()]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(configs={}):
return MetaExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
"""
========================= IMAGE LINKS =================================
Turns paragraphs like
<~~~~~~~~~~~~~~~~~~~~~~~~
dir/subdir
dir/subdir
dir/subdir
~~~~~~~~~~~~~~
dir/subdir
dir/subdir
dir/subdir
~~~~~~~~~~~~~~~~~~~>
Into mini-photo galleries.
"""
import re, markdown
import url_manager
IMAGE_LINK = """<a href="%s"><img src="%s" title="%s"/></a>"""
SLIDESHOW_LINK = """<a href="%s" target="_blank">[slideshow]</a>"""
ALBUM_LINK = """ <a href="%s">[%s]</a>"""
class ImageLinksExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.preprocessors.add("imagelink", ImageLinkPreprocessor(md), "_begin")
class ImageLinkPreprocessor(markdown.preprocessors.Preprocessor):
def run(self, lines):
url = url_manager.BlogEntryUrl(url_manager.BlogUrl("all"),
"2006/08/29/the_rest_of_our")
all_images = []
blocks = []
in_image_block = False
new_lines = []
for line in lines:
if line.startswith("<~~~~~~~"):
albums = []
rows = []
in_image_block = True
if not in_image_block:
new_lines.append(line)
else:
line = line.strip()
if line.endswith("~~~~~~>") or not line:
in_image_block = False
new_block = "<div><br/><center><span class='image-links'>\n"
album_url_hash = {}
for row in rows:
for photo_url, title in row:
new_block += " "
new_block += IMAGE_LINK % (photo_url,
photo_url.get_thumbnail(),
title)
album_url_hash[str(photo_url.get_album())] = 1
new_block += "<br/>"
new_block += "</span>"
new_block += SLIDESHOW_LINK % url.get_slideshow()
album_urls = album_url_hash.keys()
album_urls.sort()
if len(album_urls) == 1:
new_block += ALBUM_LINK % (album_urls[0], "complete album")
else :
for i in range(len(album_urls)) :
new_block += ALBUM_LINK % (album_urls[i],
"album %d" % (i + 1) )
new_lines.append(new_block + "</center><br/></div>")
elif line[1:6] == "~~~~~" :
rows.append([]) # start a new row
else :
parts = line.split()
line = parts[0]
title = " ".join(parts[1:])
album, photo = line.split("/")
photo_url = url.get_photo(album, photo,
len(all_images)+1)
all_images.append(photo_url)
rows[-1].append((photo_url, title))
if not album in albums :
albums.append(album)
return new_lines
def makeExtension(configs):
return ImageLinksExtension(configs)
| Python |
import markdown
from markdown import etree
DEFAULT_URL = "http://www.freewisdom.org/projects/python-markdown/"
DEFAULT_CREATOR = "Yuri Takhteyev"
DEFAULT_TITLE = "Markdown in Python"
GENERATOR = "http://www.freewisdom.org/projects/python-markdown/markdown2rss"
month_map = { "Jan" : "01",
"Feb" : "02",
"March" : "03",
"April" : "04",
"May" : "05",
"June" : "06",
"July" : "07",
"August" : "08",
"September" : "09",
"October" : "10",
"November" : "11",
"December" : "12" }
def get_time(heading):
heading = heading.split("-")[0]
heading = heading.strip().replace(",", " ").replace(".", " ")
month, date, year = heading.split()
month = month_map[month]
return rdftime(" ".join((month, date, year, "12:00:00 AM")))
def rdftime(time):
time = time.replace(":", " ")
time = time.replace("/", " ")
time = time.split()
return "%s-%s-%sT%s:%s:%s-08:00" % (time[0], time[1], time[2],
time[3], time[4], time[5])
def get_date(text):
return "date"
class RssExtension (markdown.Extension):
def extendMarkdown(self, md, md_globals):
self.config = { 'URL' : [DEFAULT_URL, "Main URL"],
'CREATOR' : [DEFAULT_CREATOR, "Feed creator's name"],
'TITLE' : [DEFAULT_TITLE, "Feed title"] }
md.xml_mode = True
# Insert a tree-processor that would actually add the title tag
treeprocessor = RssTreeProcessor(md)
treeprocessor.ext = self
md.treeprocessors['rss'] = treeprocessor
md.stripTopLevelTags = 0
md.docType = '<?xml version="1.0" encoding="utf-8"?>\n'
class RssTreeProcessor(markdown.treeprocessors.Treeprocessor):
def run (self, root):
rss = etree.Element("rss")
rss.set("version", "2.0")
channel = etree.SubElement(rss, "channel")
for tag, text in (("title", self.ext.getConfig("TITLE")),
("link", self.ext.getConfig("URL")),
("description", None)):
element = etree.SubElement(channel, tag)
element.text = text
for child in root:
if child.tag in ["h1", "h2", "h3", "h4", "h5"]:
heading = child.text.strip()
item = etree.SubElement(channel, "item")
link = etree.SubElement(item, "link")
link.text = self.ext.getConfig("URL")
title = etree.SubElement(item, "title")
title.text = heading
guid = ''.join([x for x in heading if x.isalnum()])
guidElem = etree.SubElement(item, "guid")
guidElem.text = guid
guidElem.set("isPermaLink", "false")
elif child.tag in ["p"]:
try:
description = etree.SubElement(item, "description")
except UnboundLocalError:
# Item not defined - moving on
pass
else:
if len(child):
content = "\n".join([etree.tostring(node)
for node in child])
else:
content = child.text
pholder = self.markdown.htmlStash.store(
"<![CDATA[ %s]]>" % content)
description.text = pholder
return rss
def makeExtension(configs):
return RssExtension(configs)
| Python |
#!/usr/bin/env python
"""
HTML Tidy Extension for Python-Markdown
=======================================
Runs [HTML Tidy][] on the output of Python-Markdown using the [uTidylib][]
Python wrapper. Both libtidy and uTidylib must be installed on your system.
Note than any Tidy [options][] can be passed in as extension configs. So,
for example, to output HTML rather than XHTML, set ``output_xhtml=0``. To
indent the output, set ``indent=auto`` and to have Tidy wrap the output in
``<html>`` and ``<body>`` tags, set ``show_body_only=0``.
[HTML Tidy]: http://tidy.sourceforge.net/
[uTidylib]: http://utidylib.berlios.de/
[options]: http://tidy.sourceforge.net/docs/quickref.html
Copyright (c)2008 [Waylan Limberg](http://achinghead.com)
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
* [HTML Tidy](http://utidylib.berlios.de/)
* [uTidylib](http://utidylib.berlios.de/)
"""
import markdown
import tidy
class TidyExtension(markdown.Extension):
def __init__(self, configs):
# Set defaults to match typical markdown behavior.
self.config = dict(output_xhtml=1,
show_body_only=1,
)
# Merge in user defined configs overriding any present if nessecary.
for c in configs:
self.config[c[0]] = c[1]
def extendMarkdown(self, md, md_globals):
# Save options to markdown instance
md.tidy_options = self.config
# Add TidyProcessor to postprocessors
md.postprocessors['tidy'] = TidyProcessor(md)
class TidyProcessor(markdown.postprocessors.Postprocessor):
def run(self, text):
# Pass text to Tidy. As Tidy does not accept unicode we need to encode
# it and decode its return value.
return unicode(tidy.parseString(text.encode('utf-8'),
**self.markdown.tidy_options))
def makeExtension(configs=None):
return TidyExtension(configs=configs)
| Python |
"""
========================= FOOTNOTES =================================
This section adds footnote handling to markdown. It can be used as
an example for extending python-markdown with relatively complex
functionality. While in this case the extension is included inside
the module itself, it could just as easily be added from outside the
module. Not that all markdown classes above are ignorant about
footnotes. All footnote functionality is provided separately and
then added to the markdown instance at the run time.
Footnote functionality is attached by calling extendMarkdown()
method of FootnoteExtension. The method also registers the
extension to allow it's state to be reset by a call to reset()
method.
Example:
Footnotes[^1] have a label[^label] and a definition[^!DEF].
[^1]: This is a footnote
[^label]: A footnote on "label"
[^!DEF]: The footnote for definition
"""
import re, markdown
from markdown import etree
FN_BACKLINK_TEXT = "zz1337820767766393qq"
NBSP_PLACEHOLDER = "qq3936677670287331zz"
DEF_RE = re.compile(r'(\ ?\ ?\ ?)\[\^([^\]]*)\]:\s*(.*)')
TABBED_RE = re.compile(r'((\t)|( ))(.*)')
class FootnoteExtension(markdown.Extension):
""" Footnote Extension. """
def __init__ (self, configs):
""" Setup configs. """
self.config = {'PLACE_MARKER':
["///Footnotes Go Here///",
"The text string that marks where the footnotes go"],
'UNIQUE_IDS':
[False,
"Avoid name collisions across "
"multiple calls to reset()."]}
for key, value in configs:
self.config[key][0] = value
# In multiple invocations, emit links that don't get tangled.
self.unique_prefix = 0
self.reset()
def extendMarkdown(self, md, md_globals):
""" Add pieces to Markdown. """
md.registerExtension(self)
self.parser = md.parser
# Insert a preprocessor before ReferencePreprocessor
md.preprocessors.add("footnote", FootnotePreprocessor(self),
"<reference")
# Insert an inline pattern before ImageReferencePattern
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
md.inlinePatterns.add("footnote", FootnotePattern(FOOTNOTE_RE, self),
"<reference")
# Insert a tree-processor that would actually add the footnote div
# This must be before the inline treeprocessor so inline patterns
# run on the contents of the div.
md.treeprocessors.add("footnote", FootnoteTreeprocessor(self),
"<inline")
# Insert a postprocessor after amp_substitute oricessor
md.postprocessors.add("footnote", FootnotePostprocessor(self),
">amp_substitute")
def reset(self):
""" Clear the footnotes on reset, and prepare for a distinct document. """
self.footnotes = markdown.odict.OrderedDict()
self.unique_prefix += 1
def findFootnotesPlaceholder(self, root):
""" Return ElementTree Element that contains Footnote placeholder. """
def finder(element):
for child in element:
if child.text:
if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
return child, True
if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return (child, element), False
finder(child)
return None
res = finder(root)
return res
def setFootnote(self, id, text):
""" Store a footnote for later retrieval. """
self.footnotes[id] = text
def makeFootnoteId(self, id):
""" Return footnote link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fn:%d-%s' % (self.unique_prefix, id)
else:
return 'fn:%s' % id
def makeFootnoteRefId(self, id):
""" Return footnote back-link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fnref:%d-%s' % (self.unique_prefix, id)
else:
return 'fnref:%s' % id
def makeFootnotesDiv(self, root):
""" Return div of footnotes as et Element. """
if not self.footnotes.keys():
return None
div = etree.Element("div")
div.set('class', 'footnote')
hr = etree.SubElement(div, "hr")
ol = etree.SubElement(div, "ol")
for id in self.footnotes.keys():
li = etree.SubElement(ol, "li")
li.set("id", self.makeFootnoteId(id))
self.parser.parseChunk(li, self.footnotes[id])
backlink = etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id))
backlink.set("rev", "footnote")
backlink.set("title", "Jump back to footnote %d in the text" % \
(self.footnotes.index(id)+1))
backlink.text = FN_BACKLINK_TEXT
if li.getchildren():
node = li[-1]
if node.tag == "p":
node.text = node.text + NBSP_PLACEHOLDER
node.append(backlink)
else:
p = etree.SubElement(li, "p")
p.append(backlink)
return div
class FootnotePreprocessor(markdown.preprocessors.Preprocessor):
""" Find all footnote references and store for later use. """
def __init__ (self, footnotes):
self.footnotes = footnotes
def run(self, lines):
lines = self._handleFootnoteDefinitions(lines)
text = "\n".join(lines)
return text.split("\n")
def _handleFootnoteDefinitions(self, lines):
"""
Recursively find all footnote definitions in lines.
Keywords:
* lines: A list of lines of text
Return: A list of lines with footnote definitions removed.
"""
i, id, footnote = self._findFootnoteDefinition(lines)
if id :
plain = lines[:i]
detabbed, theRest = self.detectTabbed(lines[i+1:])
self.footnotes.setFootnote(id,
footnote + "\n"
+ "\n".join(detabbed))
more_plain = self._handleFootnoteDefinitions(theRest)
return plain + [""] + more_plain
else :
return lines
def _findFootnoteDefinition(self, lines):
"""
Find the parts of a footnote definition.
Keywords:
* lines: A list of lines of text.
Return: A three item tuple containing the index of the first line of a
footnote definition, the id of the definition and the body of the
definition.
"""
counter = 0
for line in lines:
m = DEF_RE.match(line)
if m:
return counter, m.group(2), m.group(3)
counter += 1
return counter, None, None
def detectTabbed(self, lines):
""" Find indented text and remove indent before further proccesing.
Keyword arguments:
* lines: an array of strings
Returns: a list of post processed items and the unused
remainder of the original list
"""
items = []
item = -1
i = 0 # to keep track of where we are
def detab(line):
match = TABBED_RE.match(line)
if match:
return match.group(4)
for line in lines:
if line.strip(): # Non-blank line
line = detab(line)
if line:
items.append(line)
i += 1
continue
else:
return items, lines[i:]
else: # Blank line: _maybe_ we are done.
i += 1 # advance
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next_line = lines[j]; break
else:
break # There is no more text; we are done.
# Check if the next non-blank line is tabbed
if detab(next_line): # Yes, more work to do.
items.append("")
continue
else:
break # No, we are done.
else:
i += 1
return items, lines[i:]
class FootnotePattern(markdown.inlinepatterns.Pattern):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, footnotes):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.footnotes = footnotes
def handleMatch(self, m):
sup = etree.Element("sup")
a = etree.SubElement(sup, "a")
id = m.group(2)
sup.set('id', self.footnotes.makeFootnoteRefId(id))
a.set('href', '#' + self.footnotes.makeFootnoteId(id))
a.set('rel', 'footnote')
a.text = str(self.footnotes.footnotes.index(id) + 1)
return sup
class FootnoteTreeprocessor(markdown.treeprocessors.Treeprocessor):
""" Build and append footnote div to end of document. """
def __init__ (self, footnotes):
self.footnotes = footnotes
def run(self, root):
footnotesDiv = self.footnotes.makeFootnotesDiv(root)
if footnotesDiv:
result = self.footnotes.findFootnotesPlaceholder(root)
if result:
node, isText = result
if isText:
node.text = None
node.getchildren().insert(0, footnotesDiv)
else:
child, element = node
ind = element.getchildren().find(child)
element.getchildren().insert(ind + 1, footnotesDiv)
child.tail = None
fnPlaceholder.parent.replaceChild(fnPlaceholder, footnotesDiv)
else:
root.append(footnotesDiv)
class FootnotePostprocessor(markdown.postprocessors.Postprocessor):
""" Replace placeholders with html entities. """
def run(self, text):
text = text.replace(FN_BACKLINK_TEXT, "↩")
return text.replace(NBSP_PLACEHOLDER, " ")
def makeExtension(configs=[]):
""" Return an instance of the FootnoteExtension """
return FootnoteExtension(configs=configs)
| Python |
#!/usr/bin/env python
'''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+
Basic usage:
>>> import markdown
>>> text = "Some text with a [[WikiLink]]."
>>> html = markdown.markdown(text, ['wikilinks'])
>>> html
u'<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>'
Whitespace behavior:
>>> markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
u'<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>'
>>> markdown.markdown('foo [[ ]] bar', ['wikilinks'])
u'<p>foo bar</p>'
To define custom settings the simple way:
>>> markdown.markdown(text,
... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
... )
u'<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>'
Custom settings the complex way:
>>> md = markdown.Markdown(
... extensions = ['wikilinks'],
... extension_configs = {'wikilinks': [
... ('base_url', 'http://example.com/'),
... ('end_url', '.html'),
... ('html_class', '') ]},
... safe_mode = True)
>>> md.convert(text)
u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
>>> text = """wiki_base_url: http://example.com/
... wiki_end_url: .html
... wiki_html_class:
...
... Some text with a [[WikiLink]]."""
>>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
>>> md.convert(text)
u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
MetaData should not carry over to next document:
>>> md.convert("No [[MetaData]] here.")
u'<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>'
Define a custom URL builder:
>>> def my_url_builder(label, base, end):
... return '/bar/'
>>> md = markdown.Markdown(extensions=['wikilinks'],
... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
>>> md.convert('[[foo]]')
u'<p><a class="wikilink" href="/bar/">foo</a></p>'
From the command line:
python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
By [Waylan Limberg](http://achinghead.com/).
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
'''
import markdown
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end)
class WikiLinkExtension(markdown.Extension):
def __init__(self, configs):
# set extension defaults
self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'],
}
# Override defaults with user settings
for key, value in configs :
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
WIKILINK_RE = r'\[\[([A-Za-z0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.config)
wikilinkPattern.md = md
md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
class WikiLinks(markdown.inlinepatterns.Pattern):
def __init__(self, pattern, config):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'][0](label, base_url, end_url)
a = markdown.etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url'][0]
end_url = self.config['end_url'][0]
html_class = self.config['html_class'][0]
if hasattr(self.md, 'Meta'):
if self.md.Meta.has_key('wiki_base_url'):
base_url = self.md.Meta['wiki_base_url'][0]
if self.md.Meta.has_key('wiki_end_url'):
end_url = self.md.Meta['wiki_end_url'][0]
if self.md.Meta.has_key('wiki_html_class'):
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(configs=None) :
return WikiLinkExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
#!/usr/bin/python
"""
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/CodeHilite>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org/)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
* [Pygments](http://pygments.org/)
"""
import markdown
# --------------- CONSTANTS YOU MIGHT WANT TO MODIFY -----------------
try:
TAB_LENGTH = markdown.TAB_LENGTH
except AttributeError:
TAB_LENGTH = 4
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite:
"""
Determine language of source code, and pass it into the pygments hilighter.
Basic Usage:
>>> code = CodeHilite(src = 'some text')
>>> html = code.hilite()
* src: Source string or any object with a .readline attribute.
* linenos: (Boolen) Turn line numbering 'on' or 'off' (off by default).
* css_class: Set class name of wrapper div ('codehilite' by default).
Low Level Usage:
>>> code = CodeHilite()
>>> code.src = 'some text' # String or anything with a .readline attr.
>>> code.linenos = True # True or False; Turns line numbering on or of.
>>> html = code.hilite()
"""
def __init__(self, src=None, linenos=False, css_class="codehilite"):
self.src = src
self.lang = None
self.linenos = linenos
self.css_class = css_class
def hilite(self):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
self._getLang()
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer, \
TextLexer
from pygments.formatters import HtmlFormatter
except ImportError:
# just escape and pass through
txt = self._escape(self.src)
if self.linenos:
txt = self._number(txt)
else :
txt = '<div class="%s"><pre>%s</pre></div>\n'% \
(self.css_class, txt)
return txt
else:
try:
lexer = get_lexer_by_name(self.lang)
except ValueError:
try:
lexer = guess_lexer(self.src)
except ValueError:
lexer = TextLexer()
formatter = HtmlFormatter(linenos=self.linenos,
cssclass=self.css_class)
return highlight(self.src, lexer, formatter)
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def _number(self, txt):
""" Use <ol> for line numbering """
# Fix Whitespace
txt = txt.replace('\t', ' '*TAB_LENGTH)
txt = txt.replace(" "*4, " ")
txt = txt.replace(" "*3, " ")
txt = txt.replace(" "*2, " ")
# Add line numbers
lines = txt.splitlines()
txt = '<div class="codehilite"><pre><ol>\n'
for line in lines:
txt += '\t<li>%s</li>\n'% line
txt += '</ol></pre></div>\n'
return txt
def _getLang(self):
"""
Determines language of a code block from shebang lines and whether said
line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang lines and
left alone. However, if no path is given (e.i.: #!python or :::python)
then it is assumed to be a mock shebang for language identifitation of a
code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off
by default.
"""
import re
#split text into lines
lines = self.src.split("\n")
#pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:::+)|(?P<shebang>[#]!)) # Shebang or 2 or more colons.
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w+-]*) # The language
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError:
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if m.group('shebang'):
# shebang exists - use line numbers
self.linenos = True
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(markdown.treeprocessors.Treeprocessor):
""" Hilight source code in code blocks. """
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.getiterator('pre')
for block in blocks:
children = block.getchildren()
if len(children) == 1 and children[0].tag == 'code':
code = CodeHilite(children[0].text,
linenos=self.config['force_linenos'][0],
css_class=self.config['css_class'][0])
placeholder = self.markdown.htmlStash.store(code.hilite(),
safe=True)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(markdown.Extension):
""" Add source code hilighting to markdown codeblocks. """
def __init__(self, configs):
# define default configs
self.config = {
'force_linenos' : [False, "Force line numbers - Default: False"],
'css_class' : ["codehilite",
"Set class name for wrapper <div> - Default: codehilite"],
}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.config
md.treeprocessors.add("hilite", hiliter, "_begin")
def makeExtension(configs={}):
return CodeHiliteExtension(configs=configs)
| Python |
"""
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
from markdown import etree
import re
class TocTreeprocessor(markdown.treeprocessors.Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
last_li = None
# Add title to the div
if self.config["title"][0]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"][0]
level = 0
list_stack=[div]
header_rgx = re.compile("[Hh][123456]")
# Get a list of id attributes
used_ids = []
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.append(c.attrib["id"])
for (p, c) in self.iterparent(doc):
if not c.text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text.find(self.config["marker"][0]) > -1 and not header_rgx.match(c.tag):
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
if header_rgx.match(c.tag):
tag_level = int(c.tag[-1])
while tag_level < level:
list_stack.pop()
level -= 1
if tag_level > level:
newlist = etree.Element("ul")
if last_li:
last_li.append(newlist)
else:
list_stack[-1].append(newlist)
list_stack.append(newlist)
level += 1
# Do not override pre-existing ids
if not "id" in c.attrib:
id = self.config["slugify"][0](c.text)
if id in used_ids:
ctr = 1
while "%s_%d" % (id, ctr) in used_ids:
ctr += 1
id = "%s_%d" % (id, ctr)
used_ids.append(id)
c.attrib["id"] = id
else:
id = c.attrib["id"]
# List item link, to be inserted into the toc div
last_li = etree.Element("li")
link = etree.SubElement(last_li, "a")
link.text = c.text
link.attrib["href"] = '#' + id
if int(self.config["anchorlink"][0]):
anchor = etree.SubElement(c, "a")
anchor.text = c.text
anchor.attrib["href"] = "#" + id
anchor.attrib["class"] = "toclink"
c.text = ""
list_stack[-1].append(last_li)
class TocExtension(markdown.Extension):
def __init__(self, configs):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [self.slugify,
"Function to generate anchors based on header text-"
"Defaults to a built in slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs:
self.setConfig(key, value)
# This is exactly the same as Django's slugify
def slugify(self, value):
""" Slugify a string, to make it URL friendly. """
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+','-',value)
def extendMarkdown(self, md, md_globals):
tocext = TocTreeprocessor(md)
tocext.config = self.config
md.treeprocessors.add("toc", tocext, "_begin")
def makeExtension(configs={}):
return TocExtension(configs=configs)
| Python |
#!/usr/bin/env python
"""
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> html
u'<p>A paragraph before a fenced code block:</p>\\n<pre><code>Fenced code block\\n</code></pre>'
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
u'<p>A paragraph before a fenced code block:</p>\\n<pre><code>Fenced code block\\n</code></pre>'
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
...
... ~~~~~~~~'''
>>> markdown.markdown(text, extensions=['fenced_code'])
u'<pre><code>\\n~~~~\\n\\n</code></pre>'
Multiple blocks and language tags:
>>> text = '''
... ~~~~{.python}
... block one
... ~~~~
...
... ~~~~.html
... <p>block two</p>
... ~~~~'''
>>> markdown.markdown(text, extensions=['fenced_code'])
u'<pre><code class="python">block one\\n</code></pre>\\n\\n<pre><code class="html"><p>block two</p>\\n</code></pre>'
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/Fenced__Code__Blocks>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown, re
# Global vars
FENCED_BLOCK_RE = re.compile( \
r'(?P<fence>^~{3,})[ ]*(\{?\.(?P<lang>[a-zA-Z0-9_-]*)\}?)?[ ]*\n(?P<code>.*?)(?P=fence)[ ]*$',
re.MULTILINE|re.DOTALL
)
CODE_WRAP = '<pre><code%s>%s</code></pre>'
LANG_TAG = ' class="%s"'
class FencedCodeExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md),
"_begin")
class FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):
def run(self, lines):
""" Match and store Fenced Code Blocks in the HtmlStash. """
text = "\n".join(lines)
while 1:
m = FENCED_BLOCK_RE.search(text)
if m:
lang = ''
if m.group('lang'):
lang = LANG_TAG % m.group('lang')
code = CODE_WRAP % (lang, self._escape(m.group('code')))
placeholder = self.markdown.htmlStash.store(code, safe=True)
text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():])
else:
break
return text.split("\n")
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(configs=None):
return FencedCodeExtension()
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
'''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
Simple Usage:
>>> import markdown
>>> text = """
... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
...
... *[ABBR]: Abbreviation
... *[REF]: Abbreviation Reference
... """
>>> markdown.markdown(text, ['abbr'])
u'<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>'
Copyright 2007-2008
* [Waylan Limberg](http://achinghead.com/)
* [Seemant Kulleen](http://www.kulleen.org/)
'''
import markdown, re
from markdown import etree
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(markdown.Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(markdown.preprocessors.Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(markdown.inlinepatterns.Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = m.group('abbr')
abbr.set('title', self.title)
return abbr
def makeExtension(configs=None):
return AbbrExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env python
"""
Python-Markdown Extra Extension
===============================
A compilation of various Python-Markdown extensions that imitates
[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
Note that each of the individual extensions still need to be available
on your PYTHONPATH. This extension simply wraps them all up as a
convenience so that only one extension needs to be listed when
initiating Markdown. See the documentation for each individual
extension for specifics about that extension.
In the event that one or more of the supported extensions are not
available for import, Markdown will issue a warning and simply continue
without that extension.
There may be additional extensions that are distributed with
Python-Markdown that are not included here in Extra. Those extensions
are not part of PHP Markdown Extra, and therefore, not part of
Python-Markdown Extra. If you really would like Extra to include
additional extensions, we suggest creating your own clone of Extra
under a differant name. You could also edit the `extensions` global
variable defined below, but be aware that such changes may be lost
when you upgrade to any future version of Python-Markdown.
"""
import markdown
extensions = ['fenced_code',
'footnotes',
'headerid',
'def_list',
'tables',
'abbr',
]
class ExtraExtension(markdown.Extension):
""" Add various extensions to Markdown class."""
def extendMarkdown(self, md, md_globals):
""" Register extension instances. """
md.registerExtensions(extensions, self.config)
def makeExtension(configs={}):
return ExtraExtension(configs=dict(configs))
| Python |
#!/usr/bin/env Python
"""
Tables Extension for Python-Markdown
====================================
Added parsing of tables to Python-Markdown.
A simple example:
First Header | Second Header
------------- | -------------
Content Cell | Content Cell
Content Cell | Content Cell
Copyright 2009 - [Waylan Limberg](http://achinghead.com)
"""
import markdown
from markdown import etree
class TableProcessor(markdown.blockprocessors.BlockProcessor):
""" Process Tables. """
def test(self, parent, block):
rows = block.split('\n')
return (len(rows) > 2 and '|' in rows[0] and
'|' in rows[1] and '-' in rows[1] and
rows[1][0] in ['|', ':', '-'])
def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[:2]
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header[0].startswith('|'):
border = True
# Get alignment of columns
align = []
for c in self._split_row(header[1], border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
thead = etree.SubElement(table, 'thead')
self._build_row(header[0], thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row, tbody, align, border)
def _build_row(self, row, parent, align, border):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ""
if a:
c.set('align', a)
def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|')
class TableExtension(markdown.Extension):
""" Add tables to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'<hashheader')
def makeExtension(configs={}):
return TableExtension(configs=configs)
| Python |
import markdown
import re
def isString(s):
""" Check if it's string """
return isinstance(s, unicode) or isinstance(s, str)
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Treeprocessor(Processor):
"""
Treeprocessors are run on the ElementTree object before serialization.
Each Treeprocessor implements a "run" method that takes a pointer to an
ElementTree, modifies it as necessary and returns an ElementTree
object.
Treeprocessors must extend markdown.Treeprocessor.
"""
def run(self, root):
"""
Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None.
"""
pass
class InlineProcessor(Treeprocessor):
"""
A Treeprocessor that traverses a tree, applying inline patterns.
"""
def __init__ (self, md):
self.__placeholder_prefix = markdown.INLINE_PLACEHOLDER_PREFIX
self.__placeholder_suffix = markdown.ETX
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+ len(self.__placeholder_suffix)
self.__placeholder_re = re.compile(markdown.INLINE_PLACEHOLDER % r'([0-9]{4})')
self.markdown = md
def __makePlaceholder(self, type):
""" Generate a placeholder """
id = "%04d" % len(self.stashed_nodes)
hash = markdown.INLINE_PLACEHOLDER % id
return hash, id
def __findPlaceholder(self, data, index):
"""
Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder.
"""
m = self.__placeholder_re.search(data, index)
if m:
return m.group(1), m.end()
else:
return None, index + 1
def __stashNode(self, node, type):
""" Add node to stash """
placeholder, id = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder
def __handleInline(self, data, patternIndex=0):
"""
Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders.
"""
if not isinstance(data, markdown.AtomicString):
startIndex = 0
while patternIndex < len(self.markdown.inlinePatterns):
data, matched, startIndex = self.__applyPattern(
self.markdown.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex)
if not matched:
patternIndex += 1
return data
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode)
if not isText and node is not subnode:
pos = node.getchildren().index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
def __processPlaceholders(self, data, parent):
"""
Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns.
"""
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
else:
if parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if index != -1:
id, phEndIndex = self.__findPlaceholder(data, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = data[strartIndex:index]
linkText(text)
if not isString(node): # it's Element
for child in [node] + node.getchildren():
if child.tail:
if child.tail.strip():
self.__processElementText(node, child, False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else: # it's just a string
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else: # wrong placeholder
end = index + len(prefix)
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
linkText(text)
data = ""
return result
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
"""
Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we starting search
Returns: String with placeholders instead of ElementTree elements.
"""
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if not match:
return data, False, 0
node = pattern.handleMatch(match)
if node is None:
return data, True, len(leftData) + match.span(len(match.groups()))[0]
if not isString(node):
if not isinstance(node.text, markdown.AtomicString):
# We need to process current node too
for child in [node] + node.getchildren():
if not isString(node):
if child.text:
child.text = self.__handleInline(child.text,
patternIndex + 1)
if child.tail:
child.tail = self.__handleInline(child.tail,
patternIndex)
placeholder = self.__stashNode(node, pattern.type())
return "%s%s%s%s" % (leftData,
match.group(1),
placeholder, match.groups()[-1]), True, 0
def run(self, tree):
"""Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want process your data with inline paterns, instead of normal string,
use subclass AtomicString:
node.text = markdown.AtomicString("data won't be processed with inline patterns")
Arguments:
* markdownTree: ElementTree object, representing Markdown tree.
Returns: ElementTree object with applied inline patterns.
"""
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement.getchildren():
if child.text and not isinstance(child.text, markdown.AtomicString):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(
text), child)
stack += lst
insertQueue.append((child, lst))
if child.getchildren():
stack.append(child)
for element, lst in insertQueue:
if element.text:
element.text = \
markdown.inlinepatterns.handleAttributes(element.text,
element)
i = 0
for newChild in lst:
# Processing attributes
if newChild.tail:
newChild.tail = \
markdown.inlinepatterns.handleAttributes(newChild.tail,
element)
if newChild.text:
newChild.text = \
markdown.inlinepatterns.handleAttributes(newChild.text,
newChild)
element.insert(i, newChild)
i += 1
return tree
class PrettifyTreeprocessor(Treeprocessor):
""" Add linebreaks to the html document. """
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = "\n"
if markdown.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) \
and len(elem) and markdown.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if markdown.isBlockLevel(e.tag):
self._prettifyETree(e)
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
def run(self, root):
""" Add linebreaks to ElementTree root object. """
self._prettifyETree(root)
# Do <br />'s seperately as they are often in the middle of
# inline content and missed by _prettifyETree.
brs = root.getiterator('br')
for br in brs:
if not br.tail or not br.tail.strip():
br.tail = '\n'
else:
br.tail = '\n%s' % br.tail
| Python |
"""
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
import markdown
import re
from urlparse import urlparse, urlunparse
import sys
if sys.version >= "3.0":
from html import entities as htmlentitydefs
else:
import htmlentitydefs
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'(\*)([^\*]+)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.+?)\2' # **strong**
STRONG_EM_RE = r'(\*{3}|_{3})(.+?)\2' # ***strong***
if markdown.SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\w)(_)(\S.+?)\2(?!\w)' # _emphasis_
else:
EMPHASIS_2_RE = r'(_)(.+?)\2' # _emphasis_
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12)?\)'''
# [text](url) or [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
#  or 
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
def dequote(string):
"""Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern:
"""Base class that inline patterns subclass. """
def __init__ (self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp (self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
text = m.group(2)
if text == markdown.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class SimpleTagPattern (Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern (SimpleTagPattern):
""" Return a eLement of type `tag` with no children. """
def handleMatch (self, m):
return markdown.etree.Element(self.tag)
class BacktickPattern (Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = markdown.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern (SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = markdown.etree.Element(tag1)
el2 = markdown.etree.SubElement(el1, tag2)
el2.text = m.group(3)
return el1
class HtmlPattern (Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch (self, m):
rawhtml = m.group(2)
inline = True
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
class LinkPattern (Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
# Refuse to process unlabelled links. In XHTML mode this produces a
# self-closing <a> tag which is misinterpreted by most browsers as an
# opening one and makes the rest of the page a link. Which is
# definitely not the intended behaviour.
if not m.group(2):
return None
el = markdown.etree.Element("a")
el.text = m.group(2)
title = m.group(11)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(href.strip()))
else:
el.set("href", "")
if title:
title = dequote(title) #.replace('"', """)
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
locless_schemes = ['', 'mailto', 'news']
scheme, netloc, path, params, query, fragment = url = urlparse(url)
safe_url = False
if netloc != '' or scheme in locless_schemes:
safe_url = True
for part in url[2:]:
if ":" in part:
safe_url = False
if self.markdown.safeMode and not safe_url:
return ''
else:
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(src))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(" ".join(src_parts[1:])))
if markdown.ENABLE_ATTRIBUTES:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', truealt)
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
def handleMatch(self, m):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not id in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = markdown.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern (ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = markdown.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
el.set("alt", text)
return el
class AutolinkPattern (Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.set('href', m.group(2))
el.text = markdown.AtomicString(m.group(2))
return el
class AutomailPattern (Pattern):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m):
el = markdown.etree.Element('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = htmlentitydefs.codepoint2name.get(code)
if entity:
return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = markdown.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
| Python |
"""
CORE MARKDOWN BLOCKPARSER
=============================================================================
This parser handles basic parsing of Markdown blocks. It doesn't concern itself
with inline elements such as **bold** or *italics*, but rather just catches
blocks, lists, quotes, etc.
The BlockParser is made up of a bunch of BlockProssors, each handling a
different type of block. Extensions may add/replace/remove BlockProcessors
as they need to alter how markdown blocks are parsed.
"""
import re
import markdown
class BlockProcessor:
""" Base class for block processors.
Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own ``test`` and ``run``
methods. The ``test`` method should return True or False, to indicate
whether the current block should be processed by this processor. If the
test passes, the parser will call the processors ``run`` method.
"""
def __init__(self, parser=None):
self.parser = parser
def lastChild(self, parent):
""" Return the last child of an etree element. """
if len(parent):
return parent[-1]
else:
return None
def detab(self, text):
""" Remove a tab from the front of each line of the given text. """
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' '*markdown.TAB_LENGTH):
newtext.append(line[markdown.TAB_LENGTH:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def looseDetab(self, text, level=1):
""" Remove a tab from front of lines but allowing dedented lines. """
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(' '*markdown.TAB_LENGTH*level):
lines[i] = lines[i][markdown.TAB_LENGTH*level:]
return '\n'.join(lines)
def test(self, parent, block):
""" Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the ``test`` method
on each to determine if the given block of text is of that type. This
method must return a boolean ``True`` or ``False``. The actual method of
testing is left to the needs of that particular block type. It could
be as simple as ``block.startswith(some_string)`` or a complex regular
expression. As the block type may be different depending on the parent
of the block (i.e. inside a list), the parent etree element is also
provided and may be used as part of the test.
Keywords:
* ``parent``: A etree element which will be the parent of the block.
* ``block``: A block of text from the source which has been split at
blank lines.
"""
pass
def run(self, parent, blocks):
""" Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's ``run`` method. This method
should parse the individual lines of the block and append them to
the etree.
Note that both the ``parent`` and ``etree`` keywords are pointers
to instances of the objects which should be edited in place. Each
processor must make changes to the existing objects as there is no
mechanism to return new/different objects to replace them.
This means that this method should be adding SubElements or adding text
to the parent, and should remove (``pop``) or add (``insert``) items to
the list of blocks.
Keywords:
* ``parent``: A etree element which is the parent of the current block.
* ``blocks``: A list of all remaining blocks of the document.
"""
pass
class ListIndentProcessor(BlockProcessor):
""" Process children of list items.
Example:
* a list item
process this part
or this part
"""
INDENT_RE = re.compile(r'^(([ ]{%s})+)'% markdown.TAB_LENGTH)
ITEM_TYPES = ['li']
LIST_TYPES = ['ul', 'ol']
def test(self, parent, block):
return block.startswith(' '*markdown.TAB_LENGTH) and \
not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or \
(len(parent) and parent[-1] and \
(parent[-1].tag in self.LIST_TYPES)
)
)
def run(self, parent, blocks):
block = blocks.pop(0)
level, sibling = self.get_level(parent, block)
block = self.looseDetab(block, level)
self.parser.state.set('detabbed')
if parent.tag in self.ITEM_TYPES:
# The parent is already a li. Just parse the child block.
self.parser.parseBlocks(parent, [block])
elif sibling.tag in self.ITEM_TYPES:
# The sibling is a li. Use it as parent.
self.parser.parseBlocks(sibling, [block])
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
# The parent is a list (``ol`` or ``ul``) which has children.
# Assume the last child li is the parent of this block.
if sibling[-1].text:
# If the parent li has text, that text needs to be moved to a p
block = '%s\n\n%s' % (sibling[-1].text, block)
sibling[-1].text = ''
self.parser.parseChunk(sibling[-1], block)
else:
self.create_item(sibling, block)
self.parser.state.reset()
def create_item(self, parent, block):
""" Create a new li and parse the block with it as the parent. """
li = markdown.etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block])
def get_level(self, parent, block):
""" Get level of indent based on list level. """
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/markdown.TAB_LENGTH
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tightlist - so we already are at correct parent.
level = 1
else:
# We're in a looselist - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of indent_level,
# we have a code block. So we stop here.
break
return level, parent
class CodeBlockProcessor(BlockProcessor):
""" Process code blocks. """
def test(self, parent, block):
return block.startswith(' '*markdown.TAB_LENGTH)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
theRest = ''
if sibling and sibling.tag == "pre" and len(sibling) \
and sibling[0].tag == "code":
# The previous block was a code block. As blank lines do not start
# new code blocks, append this block to the previous, adding back
# linebreaks removed from the split into a list.
code = sibling[0]
block, theRest = self.detab(block)
code.text = markdown.AtomicString('%s\n%s\n' % (code.text, block.rstrip()))
else:
# This is a new codeblock. Create the elements and insert text.
pre = markdown.etree.SubElement(parent, 'pre')
code = markdown.etree.SubElement(pre, 'code')
block, theRest = self.detab(block)
code.text = markdown.AtomicString('%s\n' % block.rstrip())
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
class BlockQuoteProcessor(BlockProcessor):
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing forst.
self.parser.parseBlocks(parent, [before])
# Remove ``> `` from begining of each line.
block = '\n'.join([self.clean(line) for line in
block[m.start():].split('\n')])
sibling = self.lastChild(parent)
if sibling and sibling.tag == "blockquote":
# Previous block was a blockquote so set that as this blocks parent
quote = sibling
else:
# This is a new blockquote. Create a new parent element.
quote = markdown.etree.SubElement(parent, 'blockquote')
# Recursively parse block with blockquote as parent.
self.parser.parseChunk(quote, block)
def clean(self, line):
""" Remove ``>`` from beginning of a line. """
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line
class OListProcessor(BlockProcessor):
""" Process ordered list blocks. """
TAG = 'ol'
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
RE = re.compile(r'^[ ]{0,3}\d+\.[ ]+(.*)')
# Detect items on secondary lines. they can be of either list type.
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ]+(.*)')
# Detect indented (nested) items of either type
INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ]+.*')
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
# Check fr multiple items in one block.
items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent)
if sibling and sibling.tag in ['ol', 'ul']:
# Previous block was a list item, so set that as parent
lst = sibling
# make sure previous item is in a p.
if len(lst) and lst[-1].text and not len(lst[-1]):
p = markdown.etree.SubElement(lst[-1], 'p')
p.text = lst[-1].text
lst[-1].text = ''
# parse first block differently as it gets wrapped in a p.
li = markdown.etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
else:
# This is a new list so create parent with appropriate tag.
lst = markdown.etree.SubElement(parent, self.TAG)
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the
# appropriate parent.
for item in items:
if item.startswith(' '*markdown.TAB_LENGTH):
# Item is indented. Parse with last item as parent
self.parser.parseBlocks(lst[-1], [item])
else:
# New item. Create li and parse with it as parent
li = markdown.etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
self.parser.state.reset()
def get_items(self, block):
""" Break a block into list items. """
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new item. Append
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*markdown.TAB_LENGTH):
# Previous item was indented. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
return items
class UListProcessor(OListProcessor):
""" Process unordered list blocks. """
TAG = 'ul'
RE = re.compile(r'^[ ]{0,3}[*+-][ ]+(.*)')
class HashHeaderProcessor(BlockProcessor):
""" Process Hash Headers. """
# Detect a header at start of any line in block
RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = markdown.etree.SubElement(parent, 'h%d' % len(m.group('level')))
h.text = m.group('header').strip()
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
message(CRITICAL, "We've got a problem header!")
class SetextHeaderProcessor(BlockProcessor):
""" Process Setext-style Headers. """
# Detect Setext-style header. Must be first 2 lines of block.
RE = re.compile(r'^.*?\n[=-]{3,}', re.MULTILINE)
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
# Determine level. ``=`` is 1 and ``-`` is 2.
if lines[1].startswith('='):
level = 1
else:
level = 2
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = lines[0].strip()
if len(lines) > 2:
# Block contains additional lines. Add to master blocks for later.
blocks.insert(0, '\n'.join(lines[2:]))
class HRProcessor(BlockProcessor):
""" Process Horizontal Rules. """
RE = r'[ ]{0,3}(?P<ch>[*_-])[ ]?((?P=ch)[ ]?){2,}[ ]*'
# Detect hr on any line of a block.
SEARCH_RE = re.compile(r'(^|\n)%s(\n|$)' % RE)
# Match a hr on a single line of text.
MATCH_RE = re.compile(r'^%s$' % RE)
def test(self, parent, block):
return bool(self.SEARCH_RE.search(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
prelines = []
# Check for lines in block before hr.
for line in lines:
m = self.MATCH_RE.match(line)
if m:
break
else:
prelines.append(line)
if len(prelines):
# Recursively parse lines before hr so they get parsed first.
self.parser.parseBlocks(parent, ['\n'.join(prelines)])
# create hr
hr = markdown.etree.SubElement(parent, 'hr')
# check for lines in block after hr.
lines = lines[len(prelines)+1:]
if len(lines):
# Add lines after hr to master blocks for later parsing.
blocks.insert(0, '\n'.join(lines))
class EmptyBlockProcessor(BlockProcessor):
""" Process blocks and start with an empty line. """
# Detect a block that only contains whitespace
# or only whitespace on the first line.
RE = re.compile(r'^\s*\n')
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.match(block)
if m:
# Add remaining line to master blocks for later.
blocks.insert(0, block[m.end():])
sibling = self.lastChild(parent)
if sibling and sibling.tag == 'pre' and sibling[0] and \
sibling[0].tag == 'code':
# Last block is a codeblock. Append to preserve whitespace.
sibling[0].text = markdown.AtomicString('%s/n/n/n' % sibling[0].text )
class ParagraphProcessor(BlockProcessor):
""" Process Paragraph blocks. """
def test(self, parent, block):
return True
def run(self, parent, blocks):
block = blocks.pop(0)
if block.strip():
# Not a blank block. Add to parent, otherwise throw it away.
if self.parser.state.isstate('list'):
# The parent is a tight-list. Append to parent.text
if parent.text:
parent.text = '%s\n%s' % (parent.text, block)
else:
parent.text = block.lstrip()
else:
# Create a regular paragraph
p = markdown.etree.SubElement(parent, 'p')
p.text = block.lstrip()
| Python |
import markdown
class State(list):
""" Track the current and nested state of the parser.
This utility class is used to track the state of the BlockParser and
support multiple levels if nesting. It's just a simple API wrapped around
a list. Each time a state is set, that state is appended to the end of the
list. Each time a state is reset, that state is removed from the end of
the list.
Therefore, each time a state is set for a nested block, that state must be
reset when we back out of that level of nesting or the state could be
corrupted.
While all the methods of a list object are available, only the three
defined below need be used.
"""
def set(self, state):
""" Set a new state. """
self.append(state)
def reset(self):
""" Step back one step in nested state. """
self.pop()
def isstate(self, state):
""" Test that top (current) level is of given state. """
if len(self):
return self[-1] == state
else:
return False
class BlockParser:
""" Parse Markdown blocks into an ElementTree object.
A wrapper class that stitches the various BlockProcessors together,
looping through them and creating an ElementTree object.
"""
def __init__(self):
self.blockprocessors = markdown.odict.OrderedDict()
self.state = State()
def parseDocument(self, lines):
""" Parse a markdown document into an ElementTree.
Given a list of lines, an ElementTree object (not just a parent Element)
is created and the root element is passed to the parser as the parent.
The ElementTree object is returned.
This should only be called on an entire document, not pieces.
"""
# Create a ElementTree from the lines
self.root = markdown.etree.Element(markdown.DOC_TAG)
self.parseChunk(self.root, '\n'.join(lines))
return markdown.etree.ElementTree(self.root)
def parseChunk(self, parent, text):
""" Parse a chunk of markdown text and attach to given etree node.
While the ``text`` argument is generally assumed to contain multiple
blocks which will be split on blank lines, it could contain only one
block. Generally, this method would be called by extensions when
block parsing is required.
The ``parent`` etree Element passed in is altered in place.
Nothing is returned.
"""
self.parseBlocks(parent, text.split('\n\n'))
def parseBlocks(self, parent, blocks):
""" Process blocks of markdown text and attach to given etree node.
Given a list of ``blocks``, each blockprocessor is stepped through
until there are no blocks left. While an extension could potentially
call this method directly, it's generally expected to be used internally.
This is a public method as an extension may need to add/alter additional
BlockProcessors which call this method to recursively parse a nested
block.
"""
while blocks:
for processor in self.blockprocessors.values():
if processor.test(parent, blocks[0]):
processor.run(parent, blocks)
break
| Python |
# markdown/html4.py
#
# Add html4 serialization to older versions of Elementree
# Taken from ElementTree 1.3 preview with slight modifications
#
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import markdown
ElementTree = markdown.etree.ElementTree
QName = markdown.etree.QName
Comment = markdown.etree.Comment
PI = markdown.etree.PI
ProcessingInstruction = markdown.etree.ProcessingInstruction
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublic core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _serialize_html(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
items.sort() # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v, encoding)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
def write_html(root, f,
# keyword arguments
encoding="us-ascii",
default_namespace=None):
assert root is not None
if not hasattr(f, "write"):
f = open(f, "wb")
write = f.write
if not encoding:
encoding = "us-ascii"
qnames, namespaces = _namespaces(
root, encoding, default_namespace
)
_serialize_html(
write, root, encoding, qnames, namespaces
)
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].split("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName) and tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def to_html_string(element, encoding=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
write_html(ElementTree(element).getroot(),file,encoding)
return "".join(data)
| Python |
"""
COMMAND-LINE SPECIFIC STUFF
=============================================================================
The rest of the code is specifically for handling the case where Python
Markdown is called from the command line.
"""
import markdown
import sys
import logging
from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
EXECUTABLE_NAME_FOR_USAGE = "python markdown.py"
""" The name used in the usage statement displayed for python versions < 2.3.
(With python 2.3 and higher the usage statement is generated by optparse
and uses the actual name of the executable called.) """
OPTPARSE_WARNING = """
Python 2.3 or higher required for advanced command line options.
For lower versions of Python use:
%s INPUT_FILE > OUTPUT_FILE
""" % EXECUTABLE_NAME_FOR_USAGE
def parse_options():
"""
Define and parse `optparse` options for command-line usage.
"""
try:
optparse = __import__("optparse")
except:
if len(sys.argv) == 2:
return {'input': sys.argv[1],
'output': None,
'safe': False,
'extensions': [],
'encoding': None }, CRITICAL
else:
print OPTPARSE_WARNING
return None, None
parser = optparse.OptionParser(usage="%prog INPUTFILE [options]")
parser.add_option("-f", "--file", dest="filename", default=sys.stdout,
help="write output to OUTPUT_FILE",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="encoding for input and output files",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=CRITICAL+10, dest="verbose",
help="suppress all messages")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="print info messages")
parser.add_option("-s", "--safe", dest="safe", default=False,
metavar="SAFE_MODE",
help="safe mode ('replace', 'remove' or 'escape' user's HTML tag)")
parser.add_option("-o", "--output_format", dest="output_format",
default='xhtml1', metavar="OUTPUT_FORMAT",
help="Format of output. One of 'xhtml1' (default) or 'html4'.")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="print debug messages")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "load extension EXTENSION", metavar="EXTENSION")
(options, args) = parser.parse_args()
if not len(args) == 1:
parser.print_help()
return None, None
else:
input_file = args[0]
if not options.extensions:
options.extensions = []
return {'input': input_file,
'output': options.filename,
'safe_mode': options.safe,
'extensions': options.extensions,
'encoding': options.encoding,
'output_format': options.output_format}, options.verbose
def run():
"""Run Markdown from the command line."""
# Parse options and adjust logging level if necessary
options, logging_level = parse_options()
if not options: sys.exit(0)
if logging_level: logging.getLogger('MARKDOWN').setLevel(logging_level)
# Run
markdown.markdownFromFile(**options)
| Python |
"""
POST-PROCESSORS
=============================================================================
Markdown also allows post-processors, which are similar to preprocessors in
that they need to implement a "run" method. However, they are run after core
processing.
"""
import markdown
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Postprocessor(Processor):
"""
Postprocessors are run after the ElementTree it converted back into text.
Each Postprocessor implements a "run" method that takes a pointer to a
text string, modifies it as necessary and returns a text string.
Postprocessors must extend markdown.Postprocessor.
"""
def run(self, text):
"""
Subclasses of Postprocessor should implement a `run` method, which
takes the html document as a single text string and returns a
(possibly modified) string.
"""
pass
class RawHtmlPostprocessor(Postprocessor):
""" Restore raw html to the document. """
def run(self, text):
""" Iterate over html stash and restore "safe" html. """
for i in range(self.markdown.htmlStash.html_counter):
html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
if self.markdown.safeMode and not safe:
if str(self.markdown.safeMode).lower() == 'escape':
html = self.escape(html)
elif str(self.markdown.safeMode).lower() == 'remove':
html = ''
else:
html = markdown.HTML_REMOVED_TEXT
if safe or not self.markdown.safeMode:
text = text.replace("<p>%s</p>" %
(markdown.preprocessors.HTML_PLACEHOLDER % i),
html + "\n")
text = text.replace(markdown.preprocessors.HTML_PLACEHOLDER % i,
html)
return text
def escape(self, html):
""" Basic html escaping """
html = html.replace('&', '&')
html = html.replace('<', '<')
html = html.replace('>', '>')
return html.replace('"', '"')
class AndSubstitutePostprocessor(Postprocessor):
""" Restore valid entities """
def __init__(self):
pass
def run(self, text):
text = text.replace(markdown.AMP_SUBSTITUTE, "&")
return text
| Python |
"""
PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
complicated.
"""
import re
import markdown
HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:"
HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Preprocessor (Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
"""
def run(self, lines):
"""
Each subclass of Preprocessor should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass
class HtmlStash:
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = HTML_PLACEHOLDER % self.html_counter
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
right_tag_patterns = ["</%s>", "%s>"]
def _get_left_tag(self, block):
return block[1:].replace(">", " ", 1).split()[0].lower()
def _get_right_tag(self, left_tag, block):
for p in self.right_tag_patterns:
tag = p % left_tag
i = block.rfind(tag)
if i > 2:
return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag)
return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag):
if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] != "<":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
text = text.split("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
while text:
block = text[0]
if block.startswith("\n"):
block = block[1:]
text = text[1:]
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<"):
left_tag = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag, block)
if block[1] == "!":
# is a comment block
left_tag = "--"
right_tag, data_index = self._get_right_tag(left_tag, block)
# keep checking conditions below and maybe just append
if data_index < len(block) \
and markdown.isBlockLevel(left_tag):
text.insert(0, block[data_index:])
block = block[:data_index]
if not (markdown.isBlockLevel(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
else: #if not block[1] == "!":
# if is block level tag and is not complete
if markdown.isBlockLevel(left_tag) or left_tag == "--" \
and not block.rstrip().endswith(">"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
new_blocks.append(block)
else:
items.append(block.strip())
right_tag, data_index = self._get_right_tag(left_tag, block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
in_tag = False
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
items = []
if items:
new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
return new_text.split("\n")
class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL)
def run (self, lines):
new_text = [];
for line in lines:
m = self.RE.match(line)
if m:
id = m.group(2).strip().lower()
t = m.group(4).strip() # potential title
if not t:
self.markdown.references[id] = (m.group(3), t)
elif (len(t) >= 2
and (t[0] == t[-1] == "\""
or t[0] == t[-1] == "\'"
or (t[0] == "(" and t[-1] == ")") ) ):
self.markdown.references[id] = (m.group(3), t[1:-1])
else:
new_text.append(line)
else:
new_text.append(line)
return new_text #+ "\n"
| Python |
"""
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
md = Markdown()
html = md.convert(your_text_string)
## Basic use from the command line:
markdown source.txt > destination.html
Run "markdown --help" to see more options.
## Extensions
See <http://www.freewisdom.org/projects/python-markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see docs/LICENSE for details).
"""
version = "2.0.3"
version_info = (2,0,3, "Final")
import re
import codecs
import sys
import warnings
import logging
from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
"""
CONSTANTS
=============================================================================
"""
"""
Constants you might want to modify
-----------------------------------------------------------------------------
"""
# default logging level for command-line use
COMMAND_LINE_LOGGING_LEVEL = CRITICAL
TAB_LENGTH = 4 # expand tabs to this many spaces
ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz">
SMART_EMPHASIS = True # this_or_that does not become this<i>or</i>that
DEFAULT_OUTPUT_FORMAT = 'xhtml1' # xhtml or html4 output
HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode
BLOCK_LEVEL_ELEMENTS = re.compile("p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|script|noscript|form|fieldset|iframe|math"
"|ins|del|hr|hr/|style|li|dt|dd|thead|tbody"
"|tr|th|td")
DOC_TAG = "div" # Element used to wrap document - later removed
# Placeholders
STX = u'\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = u'\u0003' # Use ETX ("End of text") for end-of-placeholder
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
AMP_SUBSTITUTE = STX+"amp"+ETX
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'),
# Hebrew (0590-05FF), Arabic (0600-06FF),
# Syriac (0700-074F), Arabic supplement (0750-077F),
# Thaana (0780-07BF), Nko (07C0-07FF).
(u'\u2D30', u'\u2D7F'), # Tifinagh
)
"""
AUXILIARY GLOBAL FUNCTIONS
=============================================================================
"""
def message(level, text):
""" A wrapper method for logging debug messages. """
logger = logging.getLogger('MARKDOWN')
if logger.handlers:
# The logger is configured
logger.log(level, text)
if level > WARN:
sys.exit(0)
elif level > WARN:
raise MarkdownException, text
else:
warnings.warn(text, MarkdownWarning)
def isBlockLevel(tag):
"""Check if the tag is a block level HTML tag."""
return BLOCK_LEVEL_ELEMENTS.match(tag)
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
class AtomicString(unicode):
"""A string which should not be further processed."""
pass
class MarkdownException(Exception):
""" A Markdown Exception. """
pass
class MarkdownWarning(Warning):
""" A Markdown Warning. """
pass
"""
OVERALL DESIGN
=============================================================================
Markdown processing takes place in four steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One such
treeprocessor runs InlinePatterns against the ElementTree, detecting inline
markup.
4. Some post-processors are run against the text after the ElementTree has
been serialized into text.
5. The output is written to a string.
Those steps are put together by the Markdown() class.
"""
import preprocessors
import blockprocessors
import treeprocessors
import inlinepatterns
import postprocessors
import blockparser
import etree_loader
import odict
# Extensions should use "markdown.etree" instead of "etree" (or do `from
# markdown import etree`). Do not import it by yourself.
etree = etree_loader.importETree()
# Adds the ability to output html4
import html4
class Markdown:
"""Convert Markdown to HTML."""
def __init__(self,
extensions=[],
extension_configs={},
safe_mode = False,
output_format=DEFAULT_OUTPUT_FORMAT):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension-configs: Configuration setting for extensions.
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
"""
self.safeMode = safe_mode
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
# Preprocessors
self.preprocessors = odict.OrderedDict()
self.preprocessors["html_block"] = \
preprocessors.HtmlBlockPreprocessor(self)
self.preprocessors["reference"] = \
preprocessors.ReferencePreprocessor(self)
# footnote preprocessor will be inserted with "<reference"
# Block processors - ran by the parser
self.parser = blockparser.BlockParser()
self.parser.blockprocessors['empty'] = \
blockprocessors.EmptyBlockProcessor(self.parser)
self.parser.blockprocessors['indent'] = \
blockprocessors.ListIndentProcessor(self.parser)
self.parser.blockprocessors['code'] = \
blockprocessors.CodeBlockProcessor(self.parser)
self.parser.blockprocessors['hashheader'] = \
blockprocessors.HashHeaderProcessor(self.parser)
self.parser.blockprocessors['setextheader'] = \
blockprocessors.SetextHeaderProcessor(self.parser)
self.parser.blockprocessors['hr'] = \
blockprocessors.HRProcessor(self.parser)
self.parser.blockprocessors['olist'] = \
blockprocessors.OListProcessor(self.parser)
self.parser.blockprocessors['ulist'] = \
blockprocessors.UListProcessor(self.parser)
self.parser.blockprocessors['quote'] = \
blockprocessors.BlockQuoteProcessor(self.parser)
self.parser.blockprocessors['paragraph'] = \
blockprocessors.ParagraphProcessor(self.parser)
#self.prePatterns = []
# Inline patterns - Run on the tree
self.inlinePatterns = odict.OrderedDict()
self.inlinePatterns["backtick"] = \
inlinepatterns.BacktickPattern(inlinepatterns.BACKTICK_RE)
self.inlinePatterns["escape"] = \
inlinepatterns.SimpleTextPattern(inlinepatterns.ESCAPE_RE)
self.inlinePatterns["reference"] = \
inlinepatterns.ReferencePattern(inlinepatterns.REFERENCE_RE, self)
self.inlinePatterns["link"] = \
inlinepatterns.LinkPattern(inlinepatterns.LINK_RE, self)
self.inlinePatterns["image_link"] = \
inlinepatterns.ImagePattern(inlinepatterns.IMAGE_LINK_RE, self)
self.inlinePatterns["image_reference"] = \
inlinepatterns.ImageReferencePattern(inlinepatterns.IMAGE_REFERENCE_RE, self)
self.inlinePatterns["autolink"] = \
inlinepatterns.AutolinkPattern(inlinepatterns.AUTOLINK_RE, self)
self.inlinePatterns["automail"] = \
inlinepatterns.AutomailPattern(inlinepatterns.AUTOMAIL_RE, self)
self.inlinePatterns["linebreak2"] = \
inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_2_RE, 'br')
self.inlinePatterns["linebreak"] = \
inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_RE, 'br')
self.inlinePatterns["html"] = \
inlinepatterns.HtmlPattern(inlinepatterns.HTML_RE, self)
self.inlinePatterns["entity"] = \
inlinepatterns.HtmlPattern(inlinepatterns.ENTITY_RE, self)
self.inlinePatterns["not_strong"] = \
inlinepatterns.SimpleTextPattern(inlinepatterns.NOT_STRONG_RE)
self.inlinePatterns["strong_em"] = \
inlinepatterns.DoubleTagPattern(inlinepatterns.STRONG_EM_RE, 'strong,em')
self.inlinePatterns["strong"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.STRONG_RE, 'strong')
self.inlinePatterns["emphasis"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_RE, 'em')
self.inlinePatterns["emphasis2"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_2_RE, 'em')
# The order of the handlers matters!!!
# Tree processors - run once we have a basic parse.
self.treeprocessors = odict.OrderedDict()
self.treeprocessors["inline"] = treeprocessors.InlineProcessor(self)
self.treeprocessors["prettify"] = \
treeprocessors.PrettifyTreeprocessor(self)
# Postprocessors - finishing touches.
self.postprocessors = odict.OrderedDict()
self.postprocessors["raw_html"] = \
postprocessors.RawHtmlPostprocessor(self)
self.postprocessors["amp_substitute"] = \
postprocessors.AndSubstitutePostprocessor()
# footnote postprocessor will be inserted with ">amp_substitute"
# Map format keys to serializers
self.output_formats = {
'html' : html4.to_html_string,
'html4' : html4.to_html_string,
'xhtml' : etree.tostring,
'xhtml1': etree.tostring,
}
self.references = {}
self.htmlStash = preprocessors.HtmlStash()
self.registerExtensions(extensions = extensions,
configs = extension_configs)
self.set_output_format(output_format)
self.reset()
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword aurguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, basestring):
ext = load_extension(ext, configs.get(ext, []))
if isinstance(ext, Extension):
try:
ext.extendMarkdown(self, globals())
except NotImplementedError, e:
message(ERROR, e)
else:
message(ERROR, 'Extension "%s.%s" must be of type: "markdown.Extension".' \
% (ext.__class__.__module__, ext.__class__.__name__))
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
extension.reset()
def set_output_format(self, format):
""" Set the output format for the class instance. """
try:
self.serializer = self.output_formats[format.lower()]
except KeyError:
message(CRITICAL, 'Invalid Output Format: "%s". Use one of %s.' \
% (format, self.output_formats.keys()))
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
"""
# Fixup the source text
if not source.strip():
return u"" # a blank unicode string
try:
source = unicode(source)
except UnicodeDecodeError:
message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')
return u""
source = source.replace(STX, "").replace(ETX, "")
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
source = re.sub(r'\n\s+\n', '\n\n', source)
source = source.expandtabs(TAB_LENGTH)
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output, length = codecs.utf_8_decode(self.serializer(root, encoding="utf-8"))
if self.stripTopLevelTags:
try:
start = output.index('<%s>'%DOC_TAG)+len(DOC_TAG)+2
end = output.rindex('</%s>'%DOC_TAG)
output = output[start:end].strip()
except ValueError:
if output.strip().endswith('<%s />'%DOC_TAG):
# We have an empty document
output = ''
else:
# We have a serious problem
message(CRITICAL, 'Failed to strip top level tags.')
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: Name of source text file.
* output: Name of output file. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
input_file = codecs.open(input, mode="r", encoding=encoding)
text = input_file.read()
input_file.close()
text = text.lstrip(u'\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if isinstance(output, (str, unicode)):
output_file = codecs.open(output, "w", encoding=encoding)
output_file.write(html)
output_file.close()
else:
output.write(html.encode(encoding))
"""
Extensions
-----------------------------------------------------------------------------
"""
class Extension:
""" Base class for extensions to subclass. """
def __init__(self, configs = {}):
"""Create an instance of an Extention.
Keyword arguments:
* configs: A dict of configuration setting used by an Extension.
"""
self.config = configs
def getConfig(self, key):
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return ""
def getConfigInfo(self):
""" Return all config settings as a list of tuples. """
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
""" Set a config setting for `key` with the given `value`. """
self.config[key][0] = value
def extendMarkdown(self, md, md_globals):
"""
Add the various proccesors and patterns to the Markdown Instance.
This method must be overriden by every extension.
Keyword arguments:
* md: The Markdown instance.
* md_globals: Global variables in the markdown module namespace.
"""
raise NotImplementedError, 'Extension "%s.%s" must define an "extendMarkdown"' \
'method.' % (self.__class__.__module__, self.__class__.__name__)
def load_extension(ext_name, configs = []):
"""Load extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
# Parse extensions config params (ignore the order)
configs = dict(configs)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
# Setup the module names
ext_module = 'markdown.extensions'
module_name_new_style = '.'.join([ext_module, ext_name])
module_name_old_style = '_'.join(['mdx', ext_name])
# Try loading the extention first from one place, then another
try: # New style (markdown.extensons.<extension>)
module = __import__(module_name_new_style, {}, {}, [ext_module])
except ImportError:
try: # Old style (mdx.<extension>)
module = __import__(module_name_old_style)
except ImportError:
message(WARN, "Failed loading extension '%s' from '%s' or '%s'"
% (ext_name, module_name_new_style, module_name_old_style))
# Return None so we don't try to initiate none-existant extension
return None
# If the module is loaded successfully, we expect it to define a
# function called makeExtension()
try:
return module.makeExtension(configs.items())
except AttributeError:
message(CRITICAL, "Failed to initiate extension '%s'" % ext_name)
def load_extensions(ext_names):
"""Loads multiple extensions"""
extensions = []
for ext_name in ext_names:
extension = load_extension(ext_name)
if extension:
extensions.append(extension)
return extensions
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text,
extensions = [],
safe_mode = False,
output_format = DEFAULT_OUTPUT_FORMAT):
"""Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* extensions: A list of extensions or extension names (may contain config args).
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
Returns: An HTML document as a string.
"""
md = Markdown(extensions=load_extensions(extensions),
safe_mode=safe_mode,
output_format=output_format)
return md.convert(text)
def markdownFromFile(input = None,
output = None,
extensions = [],
encoding = None,
safe_mode = False,
output_format = DEFAULT_OUTPUT_FORMAT):
"""Read markdown code from a file and write it to a file or a stream."""
md = Markdown(extensions=load_extensions(extensions),
safe_mode=safe_mode,
output_format=output_format)
md.convertFile(input, output, encoding)
| Python |
class OrderedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
Copied from Django's SortedDict with some modifications.
"""
def __new__(cls, *args, **kwargs):
instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
super(OrderedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
super(OrderedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(OrderedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(OrderedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(OrderedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, super(OrderedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder]
def itervalues(self):
for key in self.keyOrder:
yield super(OrderedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in dict_.items():
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(OrderedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Return the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Insert the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(OrderedDict, self).__setitem__(key, value)
def copy(self):
"""Return a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replace the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(OrderedDict, self).clear()
self.keyOrder = []
def index(self, key):
""" Return the index of a given key. """
return self.keyOrder.index(key)
def index_for_location(self, location):
""" Return index or None for a given location. """
if location == '_begin':
i = 0
elif location == '_end':
i = None
elif location.startswith('<') or location.startswith('>'):
i = self.index(location[1:])
if location.startswith('>'):
if i >= len(self):
# last item
i = None
else:
i += 1
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
return i
def add(self, key, value, location):
""" Insert by key location. """
i = self.index_for_location(location)
if i is not None:
self.insert(i, key, value)
else:
self.__setitem__(key, value)
def link(self, key, location):
""" Change location of an existing item. """
n = self.keyOrder.index(key)
del self.keyOrder[n]
i = self.index_for_location(location)
try:
if i is not None:
self.keyOrder.insert(i, key)
else:
self.keyOrder.append(key)
except Error:
# restore to prevent data loss and reraise
self.keyOrder.insert(n, key)
raise Error
| Python |
from markdown import message, CRITICAL
import sys
## Import
def importETree():
"""Import the best implementation of ElementTree, return a module object."""
etree_in_c = None
try: # Is it Python 2.5+ with C implemenation of ElementTree installed?
import xml.etree.cElementTree as etree_in_c
except ImportError:
try: # Is it Python 2.5+ with Python implementation of ElementTree?
import xml.etree.ElementTree as etree
except ImportError:
try: # An earlier version of Python with cElementTree installed?
import cElementTree as etree_in_c
except ImportError:
try: # An earlier version of Python with Python ElementTree?
import elementtree.ElementTree as etree
except ImportError:
message(CRITICAL, "Failed to import ElementTree")
sys.exit(1)
if etree_in_c and etree_in_c.VERSION < "1.0":
message(CRITICAL, "For cElementTree version 1.0 or higher is required.")
sys.exit(1)
elif etree_in_c :
return etree_in_c
elif etree.VERSION < "1.1":
message(CRITICAL, "For ElementTree version 1.1 or higher is required")
sys.exit(1)
else :
return etree
| Python |
'''Base classes and helpers for building zone specific tzinfo classes'''
from datetime import datetime, timedelta, tzinfo
from bisect import bisect_right
try:
set
except NameError:
from sets import Set as set
import pytz
__all__ = []
_timedelta_cache = {}
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
_epoch = datetime.utcfromtimestamp(0)
_datetime_cache = {0: _epoch}
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
_ttinfo_cache = {}
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
_notime = memorized_timedelta(0)
def _to_seconds(td):
'''Convert a timedelta to seconds'''
return td.seconds + td.days * 24 * 60 * 60
class BaseTzInfo(tzinfo):
# Overridden in subclass
_utcoffset = None
_tzname = None
zone = None
def __str__(self):
return self.zone
class StaticTzInfo(BaseTzInfo):
'''A timezone that has a constant offset from UTC
These timezones are rare, as most locations have changed their
offset at some point in their history
'''
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self,dt):
'''See datetime.tzinfo.utcoffset'''
return self._utcoffset
def dst(self,dt):
'''See datetime.tzinfo.dst'''
return _notime
def tzname(self,dt):
'''See datetime.tzinfo.tzname'''
return self._tzname
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError, 'Not naive datetime (tzinfo is already set)'
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError, 'Naive time - no tzinfo set'
return dt.replace(tzinfo=self)
def __repr__(self):
return '<StaticTzInfo %r>' % (self.zone,)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (self.zone,)
class DstTzInfo(BaseTzInfo):
'''A timezone that has a variable offset from UTC
The offset might change if daylight savings time comes into effect,
or at a point in history when the region decides to change their
timezone definition.
'''
# Overridden in subclass
_utc_transition_times = None # Sorted list of DST transition times in UTC
_transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding
# to _utc_transition_times entries
zone = None
# Set in __init__
_tzinfos = None
_dst = None # DST offset
def __init__(self, _inf=None, _tzinfos=None):
if _inf:
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = _inf
else:
_tzinfos = {}
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = self._transition_info[0]
_tzinfos[self._transition_info[0]] = self
for inf in self._transition_info[1:]:
if not _tzinfos.has_key(inf):
_tzinfos[inf] = self.__class__(inf, _tzinfos)
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
dt = dt.replace(tzinfo=None)
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
inf = self._transition_info[idx]
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
def normalize(self, dt):
'''Correct the timezone information on the given datetime
If date arithmetic crosses DST boundaries, the tzinfo
is not magically adjusted. This method normalizes the
tzinfo to the correct one.
To test, first we need to do some setup
>>> from pytz import timezone
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
We next create a datetime right on an end-of-DST transition point,
the instant when the wallclocks are wound back one hour.
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
Now, if we subtract a few minutes from it, note that the timezone
information has not changed.
>>> before = loc_dt - timedelta(minutes=10)
>>> before.strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
But we can fix that by calling the normalize method
>>> before = eastern.normalize(before)
>>> before.strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
'''
if dt.tzinfo is None:
raise ValueError, 'Naive time - no tzinfo set'
# Convert dt in localtime to UTC
offset = dt.tzinfo._utcoffset
dt = dt.replace(tzinfo=None)
dt = dt - offset
# convert it back, and return it
return self.fromutc(dt)
def localize(self, dt, is_dst=False):
'''Convert naive time to local time.
This method should be used to construct localtimes, rather
than passing a tzinfo argument to a datetime constructor.
is_dst is used to determine the correct timezone in the ambigous
period at the end of daylight savings time.
>>> from pytz import timezone
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> amdam = timezone('Europe/Amsterdam')
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
>>> loc_dt1.strftime(fmt)
'2004-10-31 02:00:00 CEST (+0200)'
>>> loc_dt2.strftime(fmt)
'2004-10-31 02:00:00 CET (+0100)'
>>> str(loc_dt2 - loc_dt1)
'1:00:00'
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
times at the end of daylight savings
>>> loc_dt1 = amdam.localize(dt, is_dst=None)
Traceback (most recent call last):
[...]
AmbiguousTimeError: 2004-10-31 02:00:00
is_dst defaults to False
>>> amdam.localize(dt) == amdam.localize(dt, False)
True
is_dst is also used to determine the correct timezone in the
wallclock times jumped over at the start of daylight savings time.
>>> pacific = timezone('US/Pacific')
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
>>> ploc_dt1.strftime(fmt)
'2008-03-09 02:00:00 PDT (-0700)'
>>> ploc_dt2.strftime(fmt)
'2008-03-09 02:00:00 PST (-0800)'
>>> str(ploc_dt2 - ploc_dt1)
'1:00:00'
Use is_dst=None to raise a NonExistentTimeError for these skipped
times.
>>> loc_dt1 = pacific.localize(dt, is_dst=None)
Traceback (most recent call last):
[...]
NonExistentTimeError: 2008-03-09 02:00:00
'''
if dt.tzinfo is not None:
raise ValueError, 'Not naive datetime (tzinfo is already set)'
# Find the two best possibilities.
possible_loc_dt = set()
for delta in [timedelta(days=-1), timedelta(days=1)]:
loc_dt = dt + delta
idx = max(0, bisect_right(
self._utc_transition_times, loc_dt) - 1)
inf = self._transition_info[idx]
tzinfo = self._tzinfos[inf]
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
if loc_dt.replace(tzinfo=None) == dt:
possible_loc_dt.add(loc_dt)
if len(possible_loc_dt) == 1:
return possible_loc_dt.pop()
# If there are no possibly correct timezones, we are attempting
# to convert a time that never happened - the time period jumped
# during the start-of-DST transition period.
if len(possible_loc_dt) == 0:
# If we refuse to guess, raise an exception.
if is_dst is None:
raise NonExistentTimeError(dt)
# If we are forcing the pre-DST side of the DST transition, we
# obtain the correct timezone by winding the clock forward a few
# hours.
elif is_dst:
return self.localize(
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
# If we are forcing the post-DST side of the DST transition, we
# obtain the correct timezone by winding the clock back.
else:
return self.localize(
dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6)
# If we get this far, we have multiple possible timezones - this
# is an ambiguous case occuring during the end-of-DST transition.
# If told to be strict, raise an exception since we have an
# ambiguous case
if is_dst is None:
raise AmbiguousTimeError(dt)
# Filter out the possiblilities that don't match the requested
# is_dst
filtered_possible_loc_dt = [
p for p in possible_loc_dt
if bool(p.tzinfo._dst) == is_dst
]
# Hopefully we only have one possibility left. Return it.
if len(filtered_possible_loc_dt) == 1:
return filtered_possible_loc_dt[0]
if len(filtered_possible_loc_dt) == 0:
filtered_possible_loc_dt = list(possible_loc_dt)
# If we get this far, we have in a wierd timezone transition
# where the clocks have been wound back but is_dst is the same
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
# At this point, we just have to guess unless we allow more
# hints to be passed in (such as the UTC offset or abbreviation),
# but that is just getting silly.
#
# Choose the earliest (by UTC) applicable timezone.
def mycmp(a,b):
return cmp(
a.replace(tzinfo=None) - a.tzinfo._utcoffset,
b.replace(tzinfo=None) - b.tzinfo._utcoffset,
)
filtered_possible_loc_dt.sort(mycmp)
return filtered_possible_loc_dt[0]
def utcoffset(self, dt):
'''See datetime.tzinfo.utcoffset'''
return self._utcoffset
def dst(self, dt):
'''See datetime.tzinfo.dst'''
return self._dst
def tzname(self, dt):
'''See datetime.tzinfo.tzname'''
return self._tzname
def __repr__(self):
if self._dst:
dst = 'DST'
else:
dst = 'STD'
if self._utcoffset > _notime:
return '<DstTzInfo %r %s+%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
else:
return '<DstTzInfo %r %s%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
class InvalidTimeError(Exception):
'''Base class for invalid time exceptions.'''
class AmbiguousTimeError(InvalidTimeError):
'''Exception raised when attempting to create an ambiguous wallclock time.
At the end of a DST transition period, a particular wallclock time will
occur twice (once before the clocks are set back, once after). Both
possibilities may be correct, unless further information is supplied.
See DstTzInfo.normalize() for more info
'''
class NonExistentTimeError(InvalidTimeError):
'''Exception raised when attempting to create a wallclock time that
cannot exist.
At the start of a DST transition period, the wallclock time jumps forward.
The instants jumped over never occur.
'''
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.