code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/env python
# --*-- encoding:utf-8 --*--
import urllib2,datetime,re
def cron_check(datetime,cron):
u'''cron表达式
语法:
m[1-24] s[0-59]
参数:
* 通配任意
'''
def _validate(c,s,type):
u'''验证下
'''
if c == "*":
return True
c = int(c)
s = int(s)
return {
's':lambda : (c==s and c>=0 and c<=12),
'm':lambda : (c==s and c>=0 and c<=59)
}[type]()
hour,minute = cron.split(" ")
return _validate(hour,datetime.hour,"s") and _validate(minute,datetime.minute,"m")
if __name__=="__main__":
print cron_check(datetime.datetime.now(),"09 06")
| Python |
#!/usr/bin/env python
# --*-- encoding:utf-8 --*--
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('fetion.views',
url(r'^$', 'index'),
url(r'^login$','login'),
url(r'^logout$','logout'),
url(r'^fetion/login$', 'fetion_login'),
url(r'^fetion/query$', 'fetion_query'),
url(r'^fetion/error$', 'fetion_error'),
url(r'^fetion/stop$', 'fetion_stop'),
url(r'^queue/list$', 'list_queue'),
url(r'^queue/add$', 'add_queue'),
url(r'^queue/del/(\d+)$', 'del_queue'),
url(r'^task/list$', 'list_task'),
url(r'^task/add$', 'add_task'),
url(r'^task/del/(\d+)$', 'del_task'),
)
from fetion.models import FetionStatus,FETION_STATUS_ENUM
print u'init fetion status'
FetionStatus.objects.all().update(status=FETION_STATUS_ENUM[1][0]) | Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from django.template import Library
register = Library()
@register.inclusion_tag('template.html', takes_context = True)
def phone_status(context):
request = context['request']
return request | Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from django.template import Library
register = Library()
@register.inclusion_tag('template.html', takes_context = True)
def phone_status(context):
request = context['request']
return request | Python |
#!/usr/bin/env python
# --*-- encoding:utf-8 --*--
from django.core.management.base import BaseCommand
from django.conf import settings
from fetion.webim import FetionWebIM
import SimpleXMLRPCServer
import logging
logger = logging.getLogger('fetion')
HOST_IP = "localhost"
HOST_PORT = settings.RPC_PORT
class Command(BaseCommand):
def handle(self, *args, **options):
server = SimpleXMLRPCServer.SimpleXMLRPCServer((HOST_IP, HOST_PORT),allow_none=True)
server.register_instance(FetionWebIM(logger))
print "Listening on port %s:%s"%(HOST_IP,HOST_PORT)
server.serve_forever() | Python |
#!/usr/bin/env python
# --*-- encoding:utf-8 --*--
from django.core.management.base import BaseCommand
from django.conf import settings
from fetion.webim import FetionWebIM
import SimpleXMLRPCServer
import logging
logger = logging.getLogger('fetion')
HOST_IP = "localhost"
HOST_PORT = settings.RPC_PORT
class Command(BaseCommand):
def handle(self, *args, **options):
server = SimpleXMLRPCServer.SimpleXMLRPCServer((HOST_IP, HOST_PORT),allow_none=True)
server.register_instance(FetionWebIM(logger))
print "Listening on port %s:%s"%(HOST_IP,HOST_PORT)
server.serve_forever() | Python |
#!/usr/bin/env python
# --*-- encoding:utf-8 --*--
'''
Created on 2011-12-31
@author: fredzhu.com
'''
u'''
程序基于https://webim.feixin.10086.cn/login.aspx发送消息.不记录密码
'''
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse,HttpResponseRedirect
from django.utils import simplejson as json
from django.conf import settings
from fetion.models import FetionStatus,SMSQueue,TaskCron
from fetion.models import FETION_STATUS_ENUM,TASK_ENUM
from fetion.webim import FetionWebIM
from fetion.mail import send_mail
import xmlrpclib
import logging,random
logger = logging.getLogger('fetion')
proxy = xmlrpclib.ServerProxy("http://localhost:%s/"%settings.RPC_PORT, allow_none=True)
#------------内部方法--------------
def auth(fn):
def check(*args):
request = args[0]
phone = request.session.get('phone',None)
#没登录
if not phone:
result = {"statusCode":301,"message":u"请先登录","navTabId":"","rel":"","callbackType":"","forwardUrl":""}
return HttpResponse(json.dumps(result))
return fn(*args)
return check
#----------------------------------
FETION_URL = u"https://webim.feixin.10086.cn/WebIM/Login.aspx"
def index(request):
return render_to_response('fetion/index.html',context_instance=RequestContext(request))
def logout(request):
u'''退出
'''
request.session.clear()
return HttpResponseRedirect('/')
def login(request):
u'''登录系统
'''
if request.method == 'POST':
result = {"navTabId":"queue_list","rel":"queue_list","callbackType":"","forwardUrl":""}
phone = request.POST.get('phone')
security = request.POST.get('security')
if phone and security:
if settings.SAVE_PASSWORD:
st = FetionStatus.objects.filter(phone=phone,password=security)
else:
st = FetionStatus.objects.filter(phone=phone,security=security)
if st and len(st)>0:
request.session['phone'] = phone
result.update({"statusCode":200,"message":u"操作成功"})
else:
result.update({"statusCode":300,"message":u"登录失败"})
return HttpResponse(json.dumps(result))
return render_to_response('fetion/login.html',context_instance=RequestContext(request))
def fetion_login(request):
u'''
引导用户完成飞信登录
'''
result = {"navTabId":"login","rel":"login","callbackType":"","forwardUrl":""}
if request.method == 'POST':
phone = request.POST.get('phone')
password = request.POST.get('password')
vcode = request.POST.get('vcode')
if phone and password and vcode:
#检查是否历史登陆用户
status = FetionStatus.objects.filter(phone=phone)
if status and len(status)>0 and status[0].status==FETION_STATUS_ENUM[0][0]:
logger.info(u"已经登录,跳转查询界面:%s"%phone)
result.update({"statusCode":300,"message":u"当前[%s]已经登录!"%phone})
return HttpResponse(json.dumps(result))
#登录......
code = proxy.login(phone, password, vcode)
if code == 200:
#启动线程
status = proxy.start_thread()
#获取飞信用户资料
persion = proxy.get_persion_info()
#更新状态
status = FetionStatus.objects.filter(phone=phone)
if status and len(status)>0:
fs = status[0]
fs.login_ip = request.META['REMOTE_ADDR']
fs.status = FETION_STATUS_ENUM[0][0]
fs.save()
else:
#第一次登陆
fs = FetionStatus()
fs.phone = phone
fs.uid = persion['uid']
fs.status = FETION_STATUS_ENUM[0][0]
fs.security = "".join(random.sample(['1','2','3','4','5','6','7','8','9','0','a','b','c','d','e','f','g','h','i','j','k','l','m'],15))
fs.login_ip = request.META['REMOTE_ADDR']
if settings.SAVE_PASSWORD:
fs.password = password
fs.save()
request.session['phone'] = phone
result.update({"statusCode":200,"message":u"操作成功",})
return HttpResponse(json.dumps(result))
elif code == 301:
result.update({"statusCode":200,"message":u"操作成功",})
return HttpResponse(json.dumps(result))
elif code == 312:
result.update({"statusCode":300,"message":u"验证码输入错误!",})
return HttpResponse(json.dumps(result))
elif code == 321:
result.update({"statusCode":300,"message":u"密码输入错误!",})
return HttpResponse(json.dumps(result))
else:
result.update({"statusCode":300,"message":u"未知错误!",})
return HttpResponse(json.dumps(result))
else:
pass
else:
img_url = proxy.get_vcode_img()#读验证码
return render_to_response('fetion/fetion_login.html',{'img_url':img_url},context_instance=RequestContext(request))
def fetion_query(request):
u'''
查询费心状态
'''
data = {}
if request.method == 'POST':
phone = request.POST.get('phone')
security = request.POST.get('security')
if phone and security:
st = FetionStatus.objects.filter(phone=phone,security=security)
if st and len(st)>0:
data.update({'status':st[0]})
else:
data.update({'error':u'未找到相关记录'})
else:
data.update({'error':u'请填写完整的字段'})
return render_to_response('fetion/fetion_query.html',data,context_instance=RequestContext(request))
@auth
def fetion_stop(request):
u'''下线
'''
pass
##########队列
@auth
def list_queue(request):
list = SMSQueue.objects.filter(phone=request.session['phone'])
status = FetionStatus.objects.filter(phone=request.session['phone'])[0]
datas = {'list':list,'status':status}
if status.status == 'ONLINE':
contact_list = proxy.get_contact_list()
datas.update({'contact_list':contact_list['bds']})
return render_to_response('fetion/queue_list.html',datas,context_instance=RequestContext(request))
@auth
def add_queue(request):
if request.method == "POST":
result = {"navTabId":"queue_list","rel":"queue_list","callbackType":"","forwardUrl":""}
receiver = request.POST.get('receiver',None)
msg = request.POST.get('msg',None)
phone = request.session.get('phone',None)
if receiver and msg:
queue = SMSQueue()
queue.phone = phone
queue.receiver = receiver
queue.msg = msg
queue.save()
result.update({"statusCode":200,"message":u"操作成功"})
return HttpResponse(json.dumps(result))
result.update({"statusCode":300,"message":u"操作失败"})
return HttpResponse(json.dumps(result))
@auth
def del_queue(request,id):
queue = SMSQueue.objects.get(pk=id)
if queue and queue.phone == request.session['phone']:
queue.delete()
result = {"statusCode":200,"message":u"操作成功","navTabId":"queue_list","rel":"queue_list","callbackType":"","forwardUrl":""}
return HttpResponse(json.dumps(result))
@auth
def list_task(request):
list = TaskCron.objects.filter(phone=request.session['phone'])
status = FetionStatus.objects.filter(phone=request.session['phone'])[0]
datas = {'list':list,'status':status,'tasks':TASK_ENUM}
if status.status == 'ONLINE':
contact_list = proxy.get_contact_list()
datas.update({'contact_list':contact_list['bds']})
return render_to_response('fetion/task_list.html',datas,context_instance=RequestContext(request))
@auth
def add_task(request):
if request.method == "POST":
result = {"navTabId":"task_list","rel":"task_list","callbackType":"","forwardUrl":""}
phone = request.session['phone']
cron = request.POST.get('cron',None)
receiver = request.POST.get('receiver',None)
task_type = request.POST.get('task_type',None)
text = request.POST.get('text',None)
print phone,cron,receiver,task_type
if phone and cron and receiver and task_type:
task = TaskCron()
task.phone = phone
task.cron = cron
task.receiver = receiver
task.task_type = task_type
task.text = text
task.save()
result.update({"statusCode":200,"message":u"操作成功"})
return HttpResponse(json.dumps(result))
result.update({"statusCode":300,"message":u"操作失败"})
return HttpResponse(json.dumps(result))
@auth
def del_task(request,id):
result = {"navTabId":"task_list","rel":"task_list","callbackType":"","forwardUrl":""}
task = TaskCron.objects.get(pk=id)
if task and task.phone == request.session['phone']:
task.delete()
result.update({"statusCode":200,"message":u"操作成功"})
return HttpResponse(json.dumps(result))
result.update({"statusCode":300,"message":u"操作失败"})
return HttpResponse(json.dumps(result))
| Python |
#!/usr/bin/env python
# --*-- encoding:utf-8 --*--
import datetime
def cron_check(datetime,cron):
u'''cron表达式
语法:
d[1-12] D[1-31] m[1-24] s[0-59]
参数:
* 通配任意
'''
def _validate(c,s,type):
u'''验证下
'''
if c == "*":
return True
c = int(c)
s = int(s)
return {
'd':lambda : (c==s and c>=1 and c<=12),
'D':lambda : (c==s and c>=1 and c<=31),
's':lambda : (c==s and c>=0 and c<=23),
'm':lambda : (c==s and c>=0 and c<=59)
}[type]()
mouth,day,hour,minute = cron.split(" ")
return _validate(mouth,datetime.month,"d") & _validate(day,datetime.day,"D") & _validate(hour,datetime.hour,"s") & _validate(minute,datetime.minute,"m")
if __name__=="__main__":
print cron_check(datetime.datetime.now(),"1 13 15 *")
| Python |
#!/usr/bin/env python
# --*-- encoding:utf-8 --*--
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
#设置服务器,用户名、口令以及邮箱的后缀
mail_host="smtp.gmail.com"
mail_user="ad@fengsage.com"
mail_pass="sd145914sd"
def send_mail(to, subject, text):
msg = MIMEMultipart()
msg['From'] = mail_user
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(text))
mailServer = smtplib.SMTP("smtp.gmail.com", 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(mail_user, mail_pass)
mailServer.sendmail(mail_user, to, msg.as_string())
mailServer.close()
if __name__=="__main__":
send_mail("me@fengsage.com","111111","2")
| Python |
#!/usr/bin/env python
# --*-- encoding:utf-8 --*--
CITY_LIST = (
('101210301',u'嘉兴'),
('101020100',u'上海')
)
def weather(city):
u'''天气预报
'''
import urllib2,json
API = u'http://m.weather.com.cn/data/%s.html'%city
resp = urllib2.urlopen(API)
if resp.getcode()!=200:
return
rc = json.loads(resp.read())
info = rc['weatherinfo']
msg = u'%s,今天天气%s,温度%s,明天天气%s,温度%s'%(info['city'],info['weather1'],info['temp1'],info['weather2'],info['temp2'])
return msg
if __name__=="__main__":
print weather(CITY_LIST[0][0])
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
PRJ_PATH=os.path.realpath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PRJ_PATH,'data/im.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'fetionim', # Or path to database file if using sqlite3.
'USER': 'fetionim', # Not used with sqlite3.
'PASSWORD': 'zm34mykymh4uu3w', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-CN'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PRJ_PATH,'medias')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/medias/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PRJ_PATH,'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^5y1l3ty@qc4@*tvt3$#5w2icb9!v+kxu_6l=k)eyy@3hd_3%+'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'fetionim.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PRJ_PATH,'template'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#customer app
'fetion',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(name)s %(asctime)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'fetion': {
'handlers': ['console', ],
'level':'DEBUG',
},
}
}
#是否保存飞信密码
SAVE_PASSWORD = True
RPC_PORT = 3333
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('',
url(r'^', include('fetion.urls')),
url(r'^api/', include('api.urls')),
)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
"""Classes related to saving data such as user ID and API key and the
Skill Tree as a Python pickle to remember data between sessions."""
from cPickle import load, dump
import sys
class Pickle():
"""Parent class for things that need pickling. Shouldn't be used
directly."""
def __init__(self, filename):
self.data = {}
self.filename = filename
def loadData(self):
"""Loads pickled data from self.filename."""
try:
in_pickle = open(self.filename,"rb")
self.data = load(in_pickle)
except IOError, error:
raise IOError, error
except:
print "Unexpected error:", sys.exc_info()[0]
raise
else:
in_pickle.close()
def saveData(self):
"""Saves pickled data to self.filename."""
try:
out_pickle = open(self.filename,"wb")
dump(self. data, out_pickle, -1)
except IOError, error:
raise IOError, error
except:
print "Unexpected error:", sys.exc_info()[0]
raise
else:
out_pickle.close()
class ConfigPickle(Pickle):
"""Handles reading, writing and storing account and character data."""
def __init__(self):
Pickle.__init__(self, "config.pickle")
def setAccount(self, uid, apikey, cids):
"""Adds or modifies an account to be stored, including all
characters within the account."""
self.data[uid] = {
"apikey": apikey,
"cids": cids
}
print self.data
def getCharParams(self, uid, cid):
"""Fetches a given character's data given the user ID and character
ID. Returned object is urlencoded for use as POST parameters for
fetching data from the Eve API."""
# Test for existence of required data
try:
assert uid in self.data, "Account not found."
assert cid in self.data[uid]["cids"], "Character not found."
assert "apikey" in self.data[uid], "API key not found."
except AssertionError, strerror:
print "Error: %s" % strerror
raise
# Build urlencoded POST data
params = {
'characterID': cid,
'userID': uid,
'apikey': self.data[uid]["apikey"]
}
return params
def chars(self, uid):
"""Returns a list of character IDs given a user id."""
return self.data[uid]["cids"]
def uids(self):
"""Returns a list of uids saved in the ConfigPickle."""
return self.data.keys()
class SkillPickle(Pickle):
"""Handles reading and writing a pickled SkillTree. This avoids having
to refetch and reparse the SkillTree.xml every time for no reason."""
def __init__(self):
Pickle.__init__(self, "SkillTree.pickle")
| Python |
#!/usr/bin/python
import signal
import urllib
import time
import atexit
import sys
import os
import time, datetime
import fetch, character, pickle, util
def cleanup(thread):
print "atexit cleanup() called"
thread.kill()
sys.exit(0)
def main():
config = pickle.ConfigPickle()
apikey = ""
userid = ""
characterid = ""
try:
config.loadData()
except IOError, (errno, strerror):
if errno is 2: # File not found
print "Please enter your character data for Eve Online: "
userid = raw_input("User ID: ")
apikey = raw_input("API key: ")
characterid = raw_input("Character ID: ")
config.setAccount(userid, apikey, [characterid])
config.saveData()
else:
print "Unhandled config pickle error."
sys.exit(1)
userid = config.uids()[0] # grab the first uid
characterid = config.chars(userid)[0] # grab the first char
# PoolThread(num threads, update interval)
FETCH_PREFIX = os.getcwd()
pool = fetch.PoolThread(FETCH_PREFIX, 2, 20)
# Cleanup properly on kill and program exit
atexit.register(cleanup, pool)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
signal.signal(signal.SIGINT, lambda signum, stack_frame: sys.exit(1))
pool.start()
# Yoink stuff from the pickle
try:
params = config.getCharParams(userid, characterid)
except Exception, strerr:
print strerr
sys.exit(1)
print "Using char data ", params, "\n"
eve = {
"SkillTree": "/eve/SkillTree.xml.aspx",
}
char = {
"CharacterSheet": "/char/CharacterSheet.xml.aspx",
"SkillInTraining": "/char/SkillInTraining.xml.aspx",
}
print "push"
pool.pushURL(eve["SkillTree"])
pool.pushURL(char["CharacterSheet"], params, fetch_now=True)
pool.pushURL(char["SkillInTraining"], params, fetch.
FETCH_PERIODIC)
# Build up skill tree
pool.wait(eve["SkillTree"])
stree = character.SkillTree(util.xmlpath(FETCH_PREFIX,
eve["SkillTree"]))
print "Skill tree loaded."
pool.wait(char["CharacterSheet"])
try:
mychar = character.Character(util.xmlpath(
FETCH_PREFIX, char["CharacterSheet"], params))
except Exception, errmsg:
raise
sys.exit(-1)
print "Character loaded."
while True:
pool.wait(char["SkillInTraining"])
try:
mychar.parseSIT(util.xmlpath(
FETCH_PREFIX, char["SkillInTraining"], params))
except Exception, errmsg:
raise
sys.exit(-1)
sit = mychar.skillInTraining
sit_name = stree.types[sit.trainingTypeID].typeName
level = sit.trainingToLevel
end_dt = util.eve2local(sit.trainingEndTime)
end_s = end_dt.strftime("%a, %b %d at %I:%M %p")
eta_s = util.eta_str(end_dt)
start_sp, end_sp = sit.trainingStartSP, sit.trainingDestinationSP
print
print """Currently training skill "%s" to level %s, scheduled to finish in %s on %s.""" % (sit_name, level, eta_s, end_s)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
import signal
import urllib
import fetch
import time
import atexit
import sys
import character
def cleanup(thread):
thread.kill()
def fetchtest():
pool = fetch.PoolThread(2, 5)
# Cleanup properly on kill and program exit
atexit.register(cleanup, pool)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
signal.signal(signal.SIGINT, lambda signum, stack_frame: sys.exit(1))
print "start"
pool.start()
# Test account? pulled from example
params = urllib.urlencode( {
'characterID': 150209812,
'userid': 256833,
'apikey': 'DVPtgNgmyEGk9L9RxDTJn8dCjIraiaa7efNK4VlGKAoI6Hm8uB514zy6CbFCuScC',
} )
print "push"
pool.pushURL("/char/WalletTransactions.xml.aspx", params, fetch.
FETCH_PERIODIC, True)
pool.pushURL("/account/Characters.xml.aspx", params, fetch.
FETCH_NEVER, False)
time.sleep(5)
print "refetch all"
pool.refetch(True)
while True:
continue
def parsetest():
charsheet = open("CharacterSheet.xml", "r")
sit = open("SkillInTraining.xml", "r")
mychar = character.Character(charsheet)
mychar.parseSIT(sit)
stree = character.SkillTree(open("SkillTree.xml", "r"))
if __name__ == "__main__":
parsetest()
| Python |
#!/usr/bin/python
import signal
import urllib
import time
import atexit
import sys
import os
import time, datetime
import fetch, character, pickle, util
def cleanup(thread):
print "atexit cleanup() called"
thread.kill()
sys.exit(0)
def main():
config = pickle.ConfigPickle()
apikey = ""
userid = ""
characterid = ""
try:
config.loadData()
except IOError, (errno, strerror):
if errno is 2: # File not found
print "Please enter your character data for Eve Online: "
userid = raw_input("User ID: ")
apikey = raw_input("API key: ")
characterid = raw_input("Character ID: ")
config.setAccount(userid, apikey, [characterid])
config.saveData()
else:
print "Unhandled config pickle error."
sys.exit(1)
userid = config.uids()[0] # grab the first uid
characterid = config.chars(userid)[0] # grab the first char
# PoolThread(num threads, update interval)
FETCH_PREFIX = os.getcwd()
pool = fetch.PoolThread(FETCH_PREFIX, 2, 20)
# Cleanup properly on kill and program exit
atexit.register(cleanup, pool)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
signal.signal(signal.SIGINT, lambda signum, stack_frame: sys.exit(1))
pool.start()
# Yoink stuff from the pickle
try:
params = config.getCharParams(userid, characterid)
except Exception, strerr:
print strerr
sys.exit(1)
print "Using char data ", params, "\n"
eve = {
"SkillTree": "/eve/SkillTree.xml.aspx",
}
char = {
"CharacterSheet": "/char/CharacterSheet.xml.aspx",
"SkillInTraining": "/char/SkillInTraining.xml.aspx",
}
print "push"
pool.pushURL(eve["SkillTree"])
pool.pushURL(char["CharacterSheet"], params, fetch_now=True)
pool.pushURL(char["SkillInTraining"], params, fetch.
FETCH_PERIODIC)
# Build up skill tree
pool.wait(eve["SkillTree"])
stree = character.SkillTree(util.xmlpath(FETCH_PREFIX,
eve["SkillTree"]))
print "Skill tree loaded."
pool.wait(char["CharacterSheet"])
try:
mychar = character.Character(util.xmlpath(
FETCH_PREFIX, char["CharacterSheet"], params))
except Exception, errmsg:
raise
sys.exit(-1)
print "Character loaded."
while True:
pool.wait(char["SkillInTraining"])
try:
mychar.parseSIT(util.xmlpath(
FETCH_PREFIX, char["SkillInTraining"], params))
except Exception, errmsg:
raise
sys.exit(-1)
sit = mychar.skillInTraining
sit_name = stree.types[sit.trainingTypeID].typeName
level = sit.trainingToLevel
end_dt = util.eve2local(sit.trainingEndTime)
end_s = end_dt.strftime("%a, %b %d at %I:%M %p")
eta_s = util.eta_str(end_dt)
start_sp, end_sp = sit.trainingStartSP, sit.trainingDestinationSP
print
print """Currently training skill "%s" to level %s, scheduled to finish in %s on %s.""" % (sit_name, level, eta_s, end_s)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
import signal
import urllib
import fetch
import time
import atexit
import sys
import character
def cleanup(thread):
thread.kill()
def fetchtest():
pool = fetch.PoolThread(2, 5)
# Cleanup properly on kill and program exit
atexit.register(cleanup, pool)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
signal.signal(signal.SIGINT, lambda signum, stack_frame: sys.exit(1))
print "start"
pool.start()
# Test account? pulled from example
params = urllib.urlencode( {
'characterID': 150209812,
'userid': 256833,
'apikey': 'DVPtgNgmyEGk9L9RxDTJn8dCjIraiaa7efNK4VlGKAoI6Hm8uB514zy6CbFCuScC',
} )
print "push"
pool.pushURL("/char/WalletTransactions.xml.aspx", params, fetch.
FETCH_PERIODIC, True)
pool.pushURL("/account/Characters.xml.aspx", params, fetch.
FETCH_NEVER, False)
time.sleep(5)
print "refetch all"
pool.refetch(True)
while True:
continue
def parsetest():
charsheet = open("CharacterSheet.xml", "r")
sit = open("SkillInTraining.xml", "r")
mychar = character.Character(charsheet)
mychar.parseSIT(sit)
stree = character.SkillTree(open("SkillTree.xml", "r"))
if __name__ == "__main__":
parsetest()
| Python |
"""Classes related to downloading data from the Eve API."""
import os
import httplib, urllib
import threading, Queue
import time
import util
FETCH_PERIODIC = 1
FETCH_NEVER = 2
FETCH_TYPES = (FETCH_PERIODIC, FETCH_NEVER)
class PoolThread(threading.Thread):
"""Thread that manages a number of FetchThreads. Periodically inserts
FetchItems into the fetch_pool for the FetchThreads, and handles requests
for global URL refetches."""
def __init__(self, prefix, num_fetch = 3, interval=600):
threading.Thread.__init__(self)
# Directory to save all the files
self.prefix = prefix
# Number of fetch threads to start, only need a few
self.num_fetch = num_fetch
# Interval between automatic updates
self.interval = interval
# Holds the fetch threads
self.fetch_threads = []
self.fetch_pool = Queue.Queue()
# Holds FetchItems
self.fetch_items = []
# Holds urls of fetched items, to alert when things are fetched
self.fetched = set()
# Controlled by refetch
self.refetch_event = threading.Event()
# Controlled by global refetch. Set both this and refetch_event.
self.g_event = threading.Event()
# kill switch
self.is_alive = True
def run(self):
"""Main run loop for the PoolThread."""
# Create fetch threads
for i in xrange(self.num_fetch):
self.fetch_threads.append(FetchThread(self))
self.fetch_threads[-1].start()
last_update = 0 # Force a fetch initially
while self.is_alive:
# Check for auto update and refetch event
auto = time.time() - last_update > self.interval
if self.refetch_event.isSet() or auto:
for i in xrange(len(self.fetch_items)):
itm = self.fetch_items[i]
# Fetch if it's a periodic url, or a global refetch
# or if it is marked "fetch_now"
if (auto and itm.fetch is FETCH_PERIODIC) or \
self.g_event.isSet() or itm.fetch_now:
self.fetch_pool.put(itm)
itm.fetch_now = False
# Wait for all the items to be fetched
self.fetch_pool.join()
# Reset
last_update = time.time()
self.refetch_event.clear()
time.sleep(1)
def pushURL(self, url, params=None, fetch=FETCH_NEVER, fetch_now=False):
"""Pushes a URL to be fetched into the fetch pool. The presence
of params indicates that the url is character specific. The fetch
parameter determines whether a URL should be regularaly fetched, like
the current Skill in Training. fetch_now determines if the item is to
be fetched on the next loop iteration."""
# Fetch permanent files if they don't exist
filename = util.xmlpath(self.prefix, url, params)
if fetch is FETCH_NEVER:
if not os.path.isfile(filename):
fetch_now = True
else: # Add into self.fetched if already exists to clear wait()
self.fetched.add(url)
itm = FetchItem(url, filename, params, fetch, fetch_now)
self.fetch_items.append(itm)
def refetch(self, get_all=False):
"""Sets some or all of the fetch_items to be refetched."""
# Makes run() push urls into fetch_pool
self.refetch_event.set()
if get_all:
self.g_event.set()
def kill(self):
"""Kills the PoolThread and all of its FetchThreads."""
for thread in self.fetch_threads:
thread.kill()
self.is_alive = False
def wait(self, url, timeout = 9999):
"""Waits until the requested item shows up in "fetched". Takes
an optional timeout parameter, specified in seconds."""
while url not in self.fetched:
time.sleep(1)
timeout -= 1
if timeout < 0:
raise Exception, "URL could not be fetched, timing out."
self.fetched.remove(url)
class FetchItem():
"""Class that holds a URL to be put in the fetch pool and defines
how often the URL is fetched, and if it is to be fetched immediately."""
def __init__(self, url, filename, params, fetch, fetch_now):
if fetch not in FETCH_TYPES:
raise Exception, "Invalid fetch type %s" % fetch
self.url = url
self.params = params
self.fetch = fetch
self.fetch_now = fetch_now
self.filename = filename
class FetchThread(threading.Thread):
"""Thread that watches the fetch_pool of a PoolThread, waiting for
FetchItems to fetch from the Eve API. It reports fetched items back
into the parent's "fetched" set."""
def __init__(self, pool):
threading.Thread.__init__(self)
self.pool = pool
# kill switch
self.is_alive = True
def run(self):
"""Main run loop for the FetchThread. Blocks until a FetchItem
shows up in the fetch_pool, fetches it, and then alerts the PoolThread."""
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPConnection("api.eve-online.com")
while self.is_alive:
# Block until an item pops into the pool
itm = self.pool.fetch_pool.get(True)
#print "Fetching %s..." % itm.url
conn.connect()
conn.request("POST", itm.url, urllib.urlencode(itm.params), headers)
response = conn.getresponse()
data = response.read()
conn.close()
# Check if directories exist before writing
try:
os.makedirs(os.path.split(itm.filename)[0])
except Exception, errmsg:
pass
#print errmsg # FIXME
# Write the file
handle = open(itm.filename, "w")
handle.write(data)
handle.close()
#print "%s saved as %s" % (itm.url, itm.filename)
# Tell the pool we're done
self.pool.fetch_pool.task_done()
# Put it in the fetched set
self.pool.fetched.add(itm.url)
def kill(self):
"""Kills the FetchThread."""
self.is_alive = False
| Python |
"""Miscellaneous helper functions that don't belong in the client code."""
import time
from datetime import datetime
def xmlpath(prefix, url, params=None):
"""Turns a URL of form /account/Characters.xml.aspx into a path
like 12345/12345/Characters.xml."""
parts = [prefix]
if params: # This file might be global, thus not have params
parts.append(params["userID"])
parts.append(params["characterID"])
parts.append(url[url.rindex("/")+1:-5])
filename = '/'.join(parts)
return filename
def eve2local(eve):
"""Takes a date string from Eve XML and turns it into a local
standard Python datetime object."""
# Convert to a time tuple
eve_t = time.strptime(eve, "%Y-%m-%d %H:%M:%S")
# Convert to a Unix-style timestamp
eve_u = time.mktime(eve_t)
# Subtract out the timezone and DST
local_u = eve_u - time.timezone + time.daylight*3600
# Convert to local time tuple
local_t = time.localtime(local_u)
return datetime(*local_t[:6])
# TODO: make this work when the time is already passed (test inputs!)
def eta_str(future_dt):
"""Takes a python datetime in the future and turns it into an ETA string
of the form "3 days, 1 hour, 41 minutes"."""
# find difference between future and now
eta = future_dt - datetime.today()
secs = eta.seconds
eta_t = (eta.days, secs/60/60, (secs/60 - (secs/60/60)*60))
# bits for singularizing intervals
intervals = ["days", "hours", "minutes"]
singular = lambda x, y: ( x, (y, y[:-1])[x == 1] )
# k>0 removes e.g. "0 years", "0 months" from string
eta_s = ', '.join(["%s %s" % singular(k, v) for \
k, v in zip(eta_t, intervals) if k > 0])
return eta_s
| Python |
"""Contains classes for character specific data, like attributes,
skills, wallet, and corporation. Parsing makes use of the great
cElementTree module availble standard in Python 2.5."""
import xml.etree.cElementTree as ET
ATTRIBUTES = (
"memory",
"perception",
"intelligence",
"willpower",
"charisma",
)
class Data():
"""Base class for grouping data fields together, generally based on
what XML file the data is stored in."""
def __init__(self, fields):
self.data = {}
self.fields = fields
# Initialize just so the keys are in there
for field in self.fields:
self.data[field] = ""
def __getattr__(self, name):
"""Override to make accessing Data object's data fields easier."""
if name in self.fields:
return self.data[name]
else:
raise KeyError, "Invalid field %s requested from %s object" % \
(name, self.__class__.__name__)
def checkXML(self, tree):
"""Checks if the XML tree contains an error and raises an
Exception if so."""
# cElementTree doesn't return None, dumb
if tree.find("error") is not None:
elem = tree.find("error")
raise Exception, "%s: %s" % (elem.get("code"), elem.text)
def __repr__(self):
return self.data.__repr__()
def __str__(self):
return '\n'.join(map(lambda x, y: "%s: %s" % (x, y),
self.data.keys(), self.data.values()))
class Character(Data):
"""Stores data about a single character, e.g. name, race, skills, and
attributes."""
def __init__(self, fin):
fields = (
"characterID",
"name",
"race",
"bloodLine",
"gender",
"corporationName",
"corporationID",
"balance",
"attributeEnhancers",
"attributes",
"skills",
"skillInTraining",
"params",
)
Data.__init__(self, fields)
self.data["attributeEnhancers"] = {}
self.data["attributes"] = {}
self.data["skills"] = []
# skills will be a SkillTree
# skillInTraining needs to come from another XML file.
self.parseCS(fin)
def parseCS(self, fin):
"""Takes a filename as an argument and returns
a Character object of the parsed data."""
tree = ET.parse(fin)
self.checkXML(tree)
tree = tree.find("result")
for field in self.fields:
if field == "attributeEnhancers":
for attr in ATTRIBUTES:
bonus = tree.find("attributeEnhancers")\
.find(attr+"Bonus")
if bonus:
self.data["attributeEnhancers"][attr] = \
bonus.find("augmentatorValue").text
else:
self.data["attributeEnhancers"][attr] = "0"
elif field == "attributes":
for attr in ATTRIBUTES:
self.data["attributes"][attr] = \
tree.find("attributes").find(attr).text
elif field == "skills":
rows = tree.find("rowset")
for row in rows.getchildren():
self.data["skills"].append(CharSkill(row))
elif field == "skillInTraining":
pass # handled by parseSIT()
elif field == "params":
pass # handled elsewhere
else:
self.data[field] = tree.find(field).text
def parseSIT(self, fin):
"""Parses the SkillInTraining.xml file into a SkillInTraining
object."""
tree = ET.parse(fin)
self.checkXML(tree)
tree = tree.find("result")
self.data["skillInTraining"] = SkillInTraining(tree)
class CharSkill(Data):
"""Represents a skill that has been trained by a character. Static
skill information like description, name, and group are found in Skill,
not CharSkill.
Argument is a cElementTree.Element."""
def __init__(self, elem):
fields = (
"typeID",
"skillpoints",
"level"
)
Data.__init__(self, fields)
for field in self.fields:
self.data[field] = elem.get(field)
class SkillInTraining(Data):
"""Stores data about the currently training skill for a character."""
def __init__(self, elem):
fields = (
"trainingEndTime",
"trainingStartTime",
"trainingTypeID",
"trainingStartSP",
"trainingDestinationSP",
"trainingToLevel",
"skillInTraining",
)
Data.__init__(self, fields)
for field in self.fields:
self.data[field] = elem.find(field).text
class SkillTree():
"""Represents the entire skill tree for Eve Online. Skills can be
fetched from the SkillTree either by typeID or groupID."""
def __init__(self, fin):
self.types = {} # typeID : Skill
self.groups = {} # groupID : SkillGroup
self.parse(fin)
def parse(self, fin):
"""Parses the SkillTree.xml file to populate the typeID -> Skill
and groupID -> SkillGroup dictionaries."""
tree = ET.parse(fin).find("result").find("rowset")
for group in tree.findall("row"):
# Parse in the SkillGroups, which will populate self.types
self.groups[group.get("groupID")] = SkillGroup(self, group)
class SkillGroup():
"""Wrapper for a group of skills, e.g. Mechanic, Navigation, Gunnery."""
def __init__(self, stree, group):
self.group_id = group.get("groupID")
self.group_name = group.get("groupName")
self.skills = [] # List of Skills
for skill in group.find("rowset"):
skill_obj = Skill(skill)
self.skills.append(skill_obj)
stree.types[skill.get("typeID")] = skill_obj
class Skill(Data):
"""Stores data about a single skill, e.g. name, description, prereqs."""
def __init__(self, skill):
fields = (
"typeName",
"description",
"rank",
"requiredSkills",
"requiredAttributes",
"skillBonusCollection",
)
Data.__init__(self, fields)
self.data["requiredSkills"] = []
self.data["requiredAttributes"] = ()
self.data["skillBonusCollection"] = []
for field in self.fields:
if field == "typeName":
self.data[field] = skill.get(field)
elif field =="description":
self.data[field] = skill.find(field).text
elif field == "requiredAttributes":
attrs = skill.find(field)
self.data[field] = ( attrs.find("primaryAttribute").text,
attrs.find("secondaryAttribute").text )
else: # parse the rowsets: requiredSkills, skillBonusCollection
for rowset in skill.findall("rowset"):
if rowset.get("name") == field:
if field == "requiredSkills":
for row in rowset:
self.data[field].append( (row.get("typeID"),
row.get("skillLevel")) )
if field == "skillBonusCollection":
for row in rowset.findall("row"):
self.data[field].append( (
row.get("bonusType"),
row.get("bonusValue")
) )
| Python |
"""PyRSS2Gen - A Python library for generating RSS 2.0 feeds."""
__name__ = "PyRSS2Gen"
__version__ = (1, 0, 0)
__author__ = "Andrew Dalke <dalke@dalkescientific.com>"
_generator_name = __name__ + "-" + ".".join(map(str, __version__))
import datetime
# Could make this the base class; will need to add 'publish'
class WriteXmlMixin:
def write_xml(self, outfile, encoding = "iso-8859-1"):
from xml.sax import saxutils
handler = saxutils.XMLGenerator(outfile, encoding)
handler.startDocument()
self.publish(handler)
handler.endDocument()
def to_xml(self, encoding = "iso-8859-1"):
try:
import cStringIO as StringIO
except ImportError:
import StringIO
f = StringIO.StringIO()
self.write_xml(f, encoding)
return f.getvalue()
def _element(handler, name, obj, d = {}):
if isinstance(obj, basestring) or obj is None:
# special-case handling to make the API easier
# to use for the common case.
handler.startElement(name, d)
if obj is not None:
handler.characters(obj)
handler.endElement(name)
else:
# It better know how to emit the correct XML.
obj.publish(handler)
def _opt_element(handler, name, obj):
if obj is None:
return
_element(handler, name, obj)
def _format_date(dt):
"""convert a datetime into an RFC 822 formatted date
Input date must be in GMT.
"""
# Looks like:
# Sat, 07 Sep 2002 00:00:01 GMT
# Can't use strftime because that's locale dependent
#
# Isn't there a standard way to do this for Python? The
# rfc822 and email.Utils modules assume a timestamp. The
# following is based on the rfc822 module.
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()],
dt.day,
["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][dt.month-1],
dt.year, dt.hour, dt.minute, dt.second)
##
# A couple simple wrapper objects for the fields which
# take a simple value other than a string.
class IntElement:
"""implements the 'publish' API for integers
Takes the tag name and the integer value to publish.
(Could be used for anything which uses str() to be published
to text for XML.)
"""
element_attrs = {}
def __init__(self, name, val):
self.name = name
self.val = val
def publish(self, handler):
handler.startElement(self.name, self.element_attrs)
handler.characters(str(self.val))
handler.endElement(self.name)
class DateElement:
"""implements the 'publish' API for a datetime.datetime
Takes the tag name and the datetime to publish.
Converts the datetime to RFC 2822 timestamp (4-digit year).
"""
def __init__(self, name, dt):
self.name = name
self.dt = dt
def publish(self, handler):
_element(handler, self.name, _format_date(self.dt))
####
class Category:
"""Publish a category element"""
def __init__(self, category, domain = None):
self.category = category
self.domain = domain
def publish(self, handler):
d = {}
if self.domain is not None:
d["domain"] = self.domain
_element(handler, "category", self.category, d)
class Cloud:
"""Publish a cloud"""
def __init__(self, domain, port, path,
registerProcedure, protocol):
self.domain = domain
self.port = port
self.path = path
self.registerProcedure = registerProcedure
self.protocol = protocol
def publish(self, handler):
_element(handler, "cloud", None, {
"domain": self.domain,
"port": str(self.port),
"path": self.path,
"registerProcedure": self.registerProcedure,
"protocol": self.protocol})
class Image:
"""Publish a channel Image"""
element_attrs = {}
def __init__(self, url, title, link,
width = None, height = None, description = None):
self.url = url
self.title = title
self.link = link
self.width = width
self.height = height
self.description = description
def publish(self, handler):
handler.startElement("image", self.element_attrs)
_element(handler, "url", self.url)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
width = self.width
if isinstance(width, int):
width = IntElement("width", width)
_opt_element(handler, "width", width)
height = self.height
if isinstance(height, int):
height = IntElement("height", height)
_opt_element(handler, "height", height)
_opt_element(handler, "description", self.description)
handler.endElement("image")
class Guid:
"""Publish a guid
Defaults to being a permalink, which is the assumption if it's
omitted. Hence strings are always permalinks.
"""
def __init__(self, guid, isPermaLink = 1):
self.guid = guid
self.isPermaLink = isPermaLink
def publish(self, handler):
d = {}
if self.isPermaLink:
d["isPermaLink"] = "true"
else:
d["isPermaLink"] = "false"
_element(handler, "guid", self.guid, d)
class TextInput:
"""Publish a textInput
Apparently this is rarely used.
"""
element_attrs = {}
def __init__(self, title, description, name, link):
self.title = title
self.description = description
self.name = name
self.link = link
def publish(self, handler):
handler.startElement("textInput", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "description", self.description)
_element(handler, "name", self.name)
_element(handler, "link", self.link)
handler.endElement("textInput")
class Enclosure:
"""Publish an enclosure"""
def __init__(self, url, length, type):
self.url = url
self.length = length
self.type = type
def publish(self, handler):
_element(handler, "enclosure", None,
{"url": self.url,
"length": str(self.length),
"type": self.type,
})
class Source:
"""Publish the item's original source, used by aggregators"""
def __init__(self, name, url):
self.name = name
self.url = url
def publish(self, handler):
_element(handler, "source", self.name, {"url": self.url})
class SkipHours:
"""Publish the skipHours
This takes a list of hours, as integers.
"""
element_attrs = {}
def __init__(self, hours):
self.hours = hours
def publish(self, handler):
if self.hours:
handler.startElement("skipHours", self.element_attrs)
for hour in self.hours:
_element(handler, "hour", str(hour))
handler.endElement("skipHours")
class SkipDays:
"""Publish the skipDays
This takes a list of days as strings.
"""
element_attrs = {}
def __init__(self, days):
self.days = days
def publish(self, handler):
if self.days:
handler.startElement("skipDays", self.element_attrs)
for day in self.days:
_element(handler, "day", day)
handler.endElement("skipDays")
class RSS2(WriteXmlMixin):
"""The main RSS class.
Stores the channel attributes, with the "category" elements under
".categories" and the RSS items under ".items".
"""
rss_attrs = {"version": "2.0"}
element_attrs = {}
def __init__(self,
title,
link,
description,
language = None,
copyright = None,
managingEditor = None,
webMaster = None,
pubDate = None, # a datetime, *in* *GMT*
lastBuildDate = None, # a datetime
categories = None, # list of strings or Category
generator = _generator_name,
docs = "http://blogs.law.harvard.edu/tech/rss",
cloud = None, # a Cloud
ttl = None, # integer number of minutes
image = None, # an Image
rating = None, # a string; I don't know how it's used
textInput = None, # a TextInput
skipHours = None, # a SkipHours with a list of integers
skipDays = None, # a SkipDays with a list of strings
items = None, # list of RSSItems
):
self.title = title
self.link = link
self.description = description
self.language = language
self.copyright = copyright
self.managingEditor = managingEditor
self.webMaster = webMaster
self.pubDate = pubDate
self.lastBuildDate = lastBuildDate
if categories is None:
categories = []
self.categories = categories
self.generator = generator
self.docs = docs
self.cloud = cloud
self.ttl = ttl
self.image = image
self.rating = rating
self.textInput = textInput
self.skipHours = skipHours
self.skipDays = skipDays
if items is None:
items = []
self.items = items
def publish(self, handler):
handler.startElement("rss", self.rss_attrs)
handler.startElement("channel", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
_element(handler, "description", self.description)
self.publish_extensions(handler)
_opt_element(handler, "language", self.language)
_opt_element(handler, "copyright", self.copyright)
_opt_element(handler, "managingEditor", self.managingEditor)
_opt_element(handler, "webMaster", self.webMaster)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
lastBuildDate = self.lastBuildDate
if isinstance(lastBuildDate, datetime.datetime):
lastBuildDate = DateElement("lastBuildDate", lastBuildDate)
_opt_element(handler, "lastBuildDate", lastBuildDate)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(handler)
_opt_element(handler, "generator", self.generator)
_opt_element(handler, "docs", self.docs)
if self.cloud is not None:
self.cloud.publish(handler)
ttl = self.ttl
if isinstance(self.ttl, int):
ttl = IntElement("ttl", ttl)
_opt_element(handler, "tt", ttl)
if self.image is not None:
self.image.publish(handler)
_opt_element(handler, "rating", self.rating)
if self.textInput is not None:
self.textInput.publish(handler)
if self.skipHours is not None:
self.skipHours.publish(handler)
if self.skipDays is not None:
self.skipDays.publish(handler)
for item in self.items:
item.publish(handler)
handler.endElement("channel")
handler.endElement("rss")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the three required fields.
pass
class RSSItem(WriteXmlMixin):
"""Publish an RSS Item"""
element_attrs = {}
def __init__(self,
title = None, # string
link = None, # url as string
description = None, # string
author = None, # email address as string
categories = None, # list of string or Category
comments = None, # url as string
enclosure = None, # an Enclosure
guid = None, # a unique string
pubDate = None, # a datetime
source = None, # a Source
):
if title is None and description is None:
raise TypeError(
"must define at least one of 'title' or 'description'")
self.title = title
self.link = link
self.description = description
self.author = author
if categories is None:
categories = []
self.categories = categories
self.comments = comments
self.enclosure = enclosure
self.guid = guid
self.pubDate = pubDate
self.source = source
# It sure does get tedious typing these names three times...
def publish(self, handler):
handler.startElement("item", self.element_attrs)
_opt_element(handler, "title", self.title)
_opt_element(handler, "link", self.link)
self.publish_extensions(handler)
_opt_element(handler, "description", self.description)
_opt_element(handler, "author", self.author)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(handler)
_opt_element(handler, "comments", self.comments)
if self.enclosure is not None:
self.enclosure.publish(handler)
_opt_element(handler, "guid", self.guid)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
if self.source is not None:
self.source.publish(handler)
handler.endElement("item")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the title and link elements
pass
| Python |
#coding=utf-8
'''
从sis读取数据并保存到目录
fid
yyyymm
dd
文件:
pid.post
[
{author: '', content: ''},
...
]
pid.index, pid.comment.index
word,flag,count
执行:
crawl:
1) 读取帖子内容
2) 写入.post文件, 写入.index文件
scan:
1) 扫描待下载的post
download:
1) 读取传入的.post文件
2) 下载其中资源
sync:
1) 存在.suc文件
2)
TODO:
图片下载需要处理302转接的情况: 如
http://thumbsnap.com/i/8VELKPIn.jpg
西唯美253
东唯美186
Created on 2013-2-2
@author: Administrator
'''
import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(os.path.join(__file__, '../../'))))
from ripper.parser.AisexParser import AisexParser
from ripper.bencode import getTorrentInfo #@UnresolvedImport
from ripper.syncconfig import sync_db_host, sync_db_user, sync_db_pwd, sync_db_name
from ftplib import FTP
from optparse import OptionParser
from ripper.handler import HttpHandler
from ripper.handler.images import Thumbnail
from ripper.parser.SisParser import SisParser
import datetime
import hashlib
import json
import pprint
import random
import time
import urlparse
import psutil
#sys.path.append('/root/ripper')
#pth = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../../')
DOWNLOAD_FAIL_TO_ABANDON = 2 # post任务重试次数
DOWNLOAD_IMAGE_FAIL_TO_ABANDON = 2 # 每个图片重试次数
DOWONLOAD_TIMEOUT = 60*5 # 每个post任务的超时
spam = ['img', 'src', 'http', 'www', 'info', 'jpg', 'border', 'jpeg',
'onclick', 'zoom', 'this', 'onload', 'attachimg', 'load', 'alt', 'br',
'jdghbvdscsjhkiekyvrfgh', 'jkhfdgnyverb', 'dkjfhgd',
'target', 'blank', 'font', 'image', 'nbsp', ]
TFSPAM = ['padding_file', u'封杀SIS001', u'知道真相']
POSTSPAMS = ['ai_read.post', ]
POST_TITLE_SPAM = ['共襄盛舉', ]
dataroot = '/home/datas'
ftphost = 'fuckavmm.com'
error_log = 'errors.txt'
if os.path.exists(dataroot) == False:
os.mkdir(dataroot)
FLAG_PROGRESS = '.progress' # 资源正在下载
FLAG_ERROR = '.error' # 资源下载出错
FLAG_SUC = '.success' # 资源下载完成
FLAG_TRANSFERED_FILE = '.transfered' # 资源ftp传输到文件服务器完成
FLAG_TRANSFERED_DATA = '.ftransfered' # 资源mysql传输到文件服务器完成
FLAG_TRANSFERED_RSS = '.rss' # rss生成完成
def checkdir(fid, yyyymm, dd, _dataroot):
fileroot = os.path.join(_dataroot, fid)
if not os.path.exists(fileroot):
os.mkdir(fileroot)
fileroot = os.path.join(fileroot, yyyymm)
if not os.path.exists(fileroot):
os.mkdir(fileroot)
fileroot = os.path.join(fileroot, dd)
if not os.path.exists(fileroot):
os.mkdir(fileroot)
return fileroot
def fetchpost(url, btype, fid):
''' 抓取页面 '''
# import jieba.posseg as pseg
needProxy = False
if os.name == 'nt':
needProxy = True
parser = None
if 'sis' == btype:
parser = SisParser(None, needProxy=needProxy)
elif 'aisex' == btype :
parser = AisexParser(None, needProxy=needProxy)
for post in parser.parse_obj_list(url, fid):
fid = post['forumId']
datestr = post['postDate']
purl = post['postDetailUrl']
ymd = datestr.split('-')
yyyymm = ymd[0] + '%02d' % int(ymd[1])
dd = '%02d' % int(ymd[2])
fileroot = checkdir(fid, yyyymm, dd, dataroot)
pid = parser.getpid(purl) # 帖子id
# ppage = purl.split('-')[2] # 帖子当前页
# pfpage = purl.split('-')[3] # 帖子在版面中第几页
filename = '%s.post' % pid
filename = os.path.join(fileroot, filename)
# 如果已经存在.post文件, 则跳过不抓取
if os.path.exists(filename):
continue
ct = None
try:
ct = parser.get_all_content(purl)
#垃圾过滤
for spm in POST_TITLE_SPAM:
if spm in post['title']:
continue
except Exception, err:
# 解析时出现问题
if not os.path.exists(error_log):
with open(error_log, 'w') as f:
f.write('%s, %s \n' % (err, purl) )
else:
with open(error_log, 'a') as f:
f.write('%s, %s \n' % (err, purl) )
continue
post['content'] = ct
# # index
# idxfile = os.path.join(fileroot, '%s.index' % pid)
# # 索引主楼标题和内容
# content = post['titleSimple'] + ct[0]['content']
# result = indexcontent(content, pseg)
# with open(idxfile, 'w') as f2:
# for wd, count in result:
# f2.write('%s,%s,%s,%d\n' % (fid, pid, wd, count) )
# # 索引评论
# if len(ct) > 1:
# idxfile = os.path.join(fileroot, '%s.comment.index' % pid)
# content = ''.join(map(lambda a: a['content'], ct[1:]))
# result = indexcontent(content, pseg)
# with open(idxfile, 'w') as f3:
# for wd, count in result:
# f3.write('%s,%s,%s,%d\n' % (fid, pid, wd, count) )
# 下载种子
if is_download_forum(fid):
fdir = os.path.join(fileroot, 'resource')
if not os.path.exists(fdir):
os.mkdir(fdir)
names = parser.get_torrents(purl, pid, fdir)
if names != []:
torrentdict = post.setdefault('torrents', {})
for fname in names:
# 种子信息
tfilename = os.path.join(fdir, fname)
flist, mbsize = getTorrentInfo(tfilename)
info = {'files': flist, 'size': mbsize}
torrentdict[fname] = info
# 保存内容
with open(filename, 'w') as f1:
f1.write(json.dumps(post, ensure_ascii=False))
def is_download_forum(fid):
return int(fid) in (230, 27, 143, 229, 58, 231, 20, 25, 77, 16, 4, 5, 11, 6, 345)
def indexcontent(content, pseg):
wdct = {}
cuts = list(pseg.cut(content))
flags = ['i', 'n', 'a', 'nr']
# if int(fid) in (229, 77):
# flags.append('eng')
for w in cuts:
word = w.word
if word == 'br' or \
w.flag not in flags:
continue
if word in spam:
continue
incr(wdct, word+','+w.flag)
result = sorted(wdct.iteritems(), key=lambda a: a[1], reverse=True)
return result
def incr(dct, key, num=1, default=0):
v = dct.setdefault(key, default)
v += num
dct[key] = v
def gen_download_resource(fid=None):
''' 扫描需要下载的资源
if a file with suffix FLAG_PROGRESS exists, ignore.
'''
posts = []
scanroot = dataroot
if fid is not None:
scanroot = os.path.join(scanroot, fid)
for dirpath,_, filelist in os.walk(scanroot):
for f in filelist:
dirpath = os.path.abspath(dirpath)
fname = os.path.join(dirpath, f)
fname = fname.replace('\\', '/')
if '.post' in fname:
progressfile = fname.replace('.post', FLAG_PROGRESS)
errorfile = fname.replace('.post', FLAG_ERROR)
sucfile = fname.replace('.post', FLAG_SUC)
# 下载完成的
if os.path.exists(sucfile):
continue
# 多次出错放弃的
if os.path.exists(errorfile):
continue
# 未开始下载的
if os.path.exists(progressfile) == False:
posts.append(fname)
else:
# 下载过一次并未完成的
# 出错次数小于设定值的
ct = open(progressfile).read().split('\n')
if len(ct) > 1: # 有fail记录
ct = ct[0]
retrys = int(ct)
# 任务重试次数超过限制
if retrys > DOWNLOAD_FAIL_TO_ABANDON :
# 根据内容判断是否失败
if isdownloadfailed(fname) == True:
with open(errorfile, 'w') as fff:
fff.write('damn')
os.remove(progressfile)
else:
# 根据规则判断这头已经是个下载成功的帖子了
with open(sucfile, 'w') as f:
f.write('ok')
os.remove(progressfile)
else:
posts.append(fname)
# 输出到屏幕
for p in posts:
print p
def isdownloadfailed(postfile):
'''判断帖子是否下载失败. '''
# 图片帖子大于3张,
# 下载帖子大于1张
# 是否所有图片大小相同(很可能失效)
obj = readpost(postfile)
imgs = obj['content'][0].get('localimages', {})
fid = obj['forumId']
min_img_num = 3
if is_download_forum(fid) == True:
min_img_num = 1
if len(imgs) < min_img_num:
return True
size_set = list(set(map(lambda a: a['size'], imgs.values())))
if len(size_set) == 1 and len(imgs) > 2:
if '.gif' in ''.join(map(lambda a: a['filename'], imgs.values())):
return True
return False
def readpost(postfile):
fpostfile = open(postfile)
obj = json.loads(fpostfile.read())
fpostfile.close()
return obj
def download_resource(postfile):
''' 下载 资源 '''
def saveObj(obj):
# 保存内容
with open(postfile, 'w') as f1:
f1.write(json.dumps(obj, ensure_ascii=False))
pid = os.path.basename(postfile).split('.')[0]
fdir = os.path.dirname(postfile)
fdir = os.path.join(fdir, 'resource')
if not os.path.exists(fdir):
os.mkdir(fdir)
handler = HttpHandler.HttpHandler(fdir)
allimgs = [f for f in os.listdir(fdir) if '.jpg' in f]
# images
fpostfile = open(postfile)
obj = json.loads(fpostfile.read())
fpostfile.close()
download_errors = obj.setdefault('errors', {}) # 每个文件下载出错次数
imgs = obj['content'][0]['images']
# 非图区图片大于15张的取前15张
if len(imgs) > 5:
fid, _, _ = get_info_from_path(postfile)
if is_download_forum(fid):
imgs = imgs[:5]
imgs = list(set(imgs))
random.shuffle(imgs)
localimgs = obj['content'][0].setdefault('localimages', {})
total = len(imgs)
ct = 0
fails = []
for i, imgurl in enumerate(imgs):
if download_errors.get(imgurl, 0) >= DOWNLOAD_IMAGE_FAIL_TO_ABANDON:
fails.append(imgurl)
continue
if is_image_downloaded(imgurl, allimgs) == True:
ct +=1
print '%s skip %d/%d' % (imgurl, ct, total)
continue
fname, fl, isize = handler.getImage_safe(imgurl, retrys=3)
w, h = isize
if fl == -1:
print 'dowload failed : ', imgurl
fails.append(imgurl)
incr(download_errors, imgurl)
saveObj(obj)
continue
tw, th = create_thumbnail(fname, fdir)
ct +=1
# 保存图片信息
imginfo = {}
imginfo['filename'] = os.path.basename(fname)
imginfo['width'] = w
imginfo['height'] = h
imginfo['twidth'] = tw
imginfo['theight'] = th
imginfo['size'] = fl
localimgs[i] = imginfo
saveObj(obj)
print '%s images downloaded %d/%d' % (pid, ct, total)
# 记录运行结果
progressfile = postfile.replace('.post', FLAG_PROGRESS)
sucfile = postfile.replace('.post', FLAG_SUC)
if len(fails) > 0:
# 有错误记录
counter = 0
if os.path.exists(progressfile):
ct = open(progressfile).read()
counter = ct.split('\n')[0]
counter = int(counter)
with open(progressfile, 'w') as f:
counter += 1
f.write('%d\n' % counter)
fails = '\n'.join(fails)
f.write(fails)
else:
with open(sucfile, 'w') as f:
f.write('OK')
if os.path.exists(progressfile):
os.remove(progressfile)
print 'exiting..'
# interrupt_main()
# raise StopThreadException()
sys.exit(9)
def get_info_from_path(pfile):
filepath = os.path.dirname(pfile)
filepath = filepath.replace('\\', '/')
droot = filepath.split('/')
droot = [i for i in droot if i != '']
fid, yyyymm, dd = droot[-3],droot[-2],droot[-1]
return fid, yyyymm, dd
class StopThreadException(Exception):
def __init__(self):
Exception('')
def is_image_downloaded(imgurl, imgs):
prefix = gethash(imgurl)
_imgs = [f for f in imgs if prefix in f]
for name in _imgs:
if 'crop_' in name:
return True
return False
def gethash(s):
return hashlib.md5(s).hexdigest()
# 缩略图
def create_thumbnail(org, destDir):
try:
org = os.path.join(destDir, org)
destName = 'crop_' + os.path.basename(org)
t = Thumbnail(org,destDir,destName)
return t.create(True)
except Exception, err:
print err
return 0, 0
def sync(gen_list=False, postfile=None, fid=None):
''' 通过ftp同步到资源服务器 '''
# 生成列表
if True == gen_list:
droot = dataroot
if fid is not None:
droot = os.path.join(droot, fid)
for dirpath, _, flist in os.walk(droot):
dirpath = os.path.abspath(dirpath)
for fname in flist:
fname = os.path.join(dirpath, fname)
if FLAG_SUC in fname:
transfedfile = fname.replace(FLAG_SUC, FLAG_TRANSFERED_FILE)
# print transfedfile
if os.path.exists(transfedfile) == False:
print fname.replace(FLAG_SUC, '.post').replace('\\', '/')
return
if None == postfile:
print 'Error: need a .post file to perform syncronization.'
exit(0)
pid = os.path.basename(postfile).split('.')[0]
fid, yyyymm, dd = get_info_from_path(postfile)
fpostfile = open(postfile)
obj = json.loads(fpostfile.read())
fpostfile.close()
# ftp目录存储的结构是:
# /public_html/datas/fid/yyyy/mm/dd/
print 'loginftp...'
ftp = FTP(ftphost, 'ihamster', '123z123')
print 'welcome.'
print 'switching dir...'
ftp.cwd('public_html/datas')
# Change directories - create if it doesn't exist
def chdir(dir):
if directory_exists(dir) is False: # (or negate, whatever you prefer for readability)
try:
ftp.mkd(dir)
except Exception,ex:
print ex
pass
ftp.cwd(dir)
# Check if directory exists (in current location)
def directory_exists(dir):
filelist = []
ftp.retrlines('LIST',filelist.append)
for f in filelist:
if f.split()[-1] == dir and f.upper().startswith('D'):
return True
return False
# fid, yyyymm, dd
chdir(fid)
chdir(yyyymm)
chdir(dd)
print 'cwd done.'
imgs = obj['content'][0].get('localimages', {})
total = len(imgs)
print '%s has %d images to transfer... ' % (pid, total)
imgs = sorted(imgs.iteritems(), key=lambda a: int(a[0]))
transfered = []
for i, fname in enumerate(imgs):
fname = fname[1]['filename']
cropname = 'crop_'+fname
cropname1 = 'cropsmall_'+fname
fullname = os.path.join(os.path.dirname(postfile), 'resource', fname)
fullname_crop = os.path.join(os.path.dirname(postfile), 'resource', cropname)
fullname_crop1 = os.path.join(os.path.dirname(postfile), 'resource', cropname1)
if not os.path.exists(fullname):continue
print '%s => remote' % fullname
fp = open(fullname, 'rb')
ftp.storbinary('STOR ' + fname, fp)
transfered.append(fullname)
fp.close()
if os.path.exists(fullname_crop):
fp = open(fullname_crop, 'rb')
ftp.storbinary('STOR ' + cropname, fp)
transfered.append(fullname_crop)
fp.close()
if os.path.exists(fullname_crop1):
fp = open(fullname_crop1, 'rb')
ftp.storbinary('STOR ' + cropname1, fp)
transfered.append(fullname_crop1)
fp.close()
print 'ftp: uploading %d/%d' % (i, total)
i += 1
if 'torrents' in obj:
for torrentfile in obj['torrents'].keys():
fullname = os.path.join(os.path.dirname(postfile), 'resource', torrentfile)
if not os.path.exists(fullname):continue
imgs.append(torrentfile)
fp = open(fullname, 'rb')
ftp.storbinary('STOR ' + torrentfile, fp)
fp.close()
transfered.append(fullname)
ftp.close()
# 删除文件
for f in transfered:
print 'removing' ,f
try:
os.remove(f)
except Exception,ex:
print ex
pass
# 传输完成标志
with open(postfile.replace('.post', FLAG_TRANSFERED_FILE), 'w') as transfredfile:
transfredfile.write('ok')
print 'post# %s done.' % pid
def import_data(fids=None, islist=False):
''' 向前端数据库导入数据
'''
import MySQLdb
from datetime import date
# 遍历数据目录收集采集成功的post
postfiles = []
posts = []
for fid in fids.split(','):
droot = dataroot
fid = fid.strip()
droot = os.path.join(droot, fid)
if not os.path.exists(droot):continue
for yyyymm in os.listdir(droot):
yyyymm = os.path.join(droot, yyyymm)
for dd in os.listdir(yyyymm):
dd = os.path.join(yyyymm, dd)
for fname in os.listdir(dd):
if FLAG_SUC in fname:
fname = os.path.join(dd, fname)
postfile = fname.replace(FLAG_SUC, '.post')
transferedfile = postfile.replace('.post', FLAG_TRANSFERED_DATA)
if os.path.exists(transferedfile) == False:
if os.path.basename(postfile) in POSTSPAMS: continue
postfiles.append(postfile)
if islist == True:
print postfile
with open(postfile, 'r') as pf:
pftext = pf.read()
posts.append(json.loads(pftext))
if True == islist:return
print 'total %d posts to import... ' % len(posts)
# 导入数据
posttpl = 'insert into post values (%(pid)s,%(titleSimple)s,%(postDate)s,%(forumId)s,%(catid)s,%(author)s,%(content)s,%(code)s,%(postDetailUrl)s,%(preview)s,%(tw)s,%(th)s )'
replytpl = 'insert into reply values (%(rid)s,%(pid)s,%(author)s,%(content)s,%(iorder)s,%(fid)s )'
imgtpl = 'insert into image values (%(iid)s,%(filename)s,%(pid)s,%(width)s,%(height)s,%(twidth)s,%(theight)s,%(size)s )'
torrenttpl = 'insert into torrent values (%(tid)s,%(pid)s,%(fid)s,%(filename)s,%(size)s )'
torrentfiletpl = 'insert into torrent_files values (%(tfid)s,%(filename)s,%(suffix)s,%(pid)s ,%(fid)s )'
postobjs = []
replyobjs = []
imageobjs = []
torrentobjs = []
torrentfileobjs = []
for i, o in enumerate(posts):
# post
obj = {}
pid = os.path.basename(postfiles[i]).split('.')[0]
obj['pid'] = pid
obj['forumId'] = o['forumId']
if pid =='sis_4806670':
pid = pid + '_' + obj['forumId']
obj['pid'] = pid
y, m, d = map(lambda a:int(a), o['postDate'].split('-'))
obj['postDate'] = date(y, m, d)
obj['titleSimple'] = o['titleSimple']
obj['catid'] = o.get('tag', '')
obj['author'] = o['content'][0]['author']
obj['content'] = o['content'][0]['content']
obj['code'] = get_post_id_code(pid)
obj['postDetailUrl'] = o['postDetailUrl']
# 预览 取大小最大的图片
fn, tw, th = getpreviewimge(o)
obj['preview'] = fn
obj['tw'] = tw
obj['th'] = th
postobjs.append(obj)
# replys
if len(o['content']) > 1:
iorder = 0
for reply in o['content'][1:]:
robj = {'rid': None, 'pid': pid, 'iorder': iorder, 'fid': o['forumId']}
robj['author'] = reply['author']
robj['content'] = reply['content']
iorder += 1
replyobjs.append(robj)
# image
# 图片需要按原始顺序插入
resourceroot = os.path.dirname(postfiles[i])
resourceroot = os.path.join(resourceroot, 'resource')
if 'localimages' in o['content'][0] :
# 排序
imgs = sorted(o['content'][0]['localimages'].items(), key=lambda a:int(a[0]))
for _img in imgs:
img = _img[1]
ifile = os.path.join(resourceroot, img['filename'])
# img['size'] = os.path.getsize(ifile)
img['iid'] = None # auto increment
img['pid'] = pid
img['filename'] = ifile.replace(dataroot, '').replace('\\', '/')
imageobjs.append(img)
# torrent 种子
if 'torrents' in o :
for torrentfile, tfiles in o['torrents'].items():
tobj = {'tid': None, 'fid': o['forumId'], 'pid': pid}
tfile = os.path.join(resourceroot, torrentfile)
tobj['filename'] = tfile.replace(dataroot, '').replace('\\', '/')
tobj['size'] = tfiles['size']
if tobj['size'] == None:
tobj['size'] = ''
torrentobjs.append(tobj)
if tfiles['files'] == None: continue
for fname in tfiles['files']:
isspam = False
for spam in TFSPAM:
if spam in fname:
isspam = True
break
if isspam:continue
fobj = {'tfid': None, 'fid': o['forumId'], 'pid': pid}
fobj['filename'] = fname
fobj['suffix'] = fname.split('.')[-1]
torrentfileobjs.append(fobj)
print 'total %d replies to import...' % len(replyobjs)
print 'total %d images to import...' % len(imageobjs)
print 'total %d torrents to import...' % len(torrentobjs)
print 'total %d torrent_files to import...' % len(torrentfileobjs)
db = MySQLdb.connect(sync_db_host, sync_db_user, sync_db_pwd, sync_db_name)
db.set_character_set('utf8')
# dbc.execute('SET NAMES utf8;')
# dbc.execute('SET CHARACTER SET utf8;')
# dbc.execute('SET character_set_connection=utf8;')
cur = db.cursor()
print 'transfering data...'
for pobj in postobjs:
try:
cur.executemany(posttpl, [pobj, ])
except Exception, ex:
print ex
continue
cur.executemany(replytpl, replyobjs)
cur.executemany(imgtpl, imageobjs)
cur.executemany(torrenttpl, torrentobjs)
cur.executemany(torrentfiletpl, torrentfileobjs)
for pf in postfiles:
with open(pf.replace('.post', FLAG_TRANSFERED_DATA), 'w') as ff:
ff.write('ok')
db.commit()
cur.close()
db.close()
print 'sync data done.'
def getpreviewimge(o):
if 'localimages' in o['content'][0] :
# 大小排序
if len(o['content'][0]['localimages']) == 0 :return '',0 ,0
imgs = sorted(o['content'][0]['localimages'].items(),reverse=True,\
key=lambda a:int(a[1]['size']))
pv = 'crop_' + imgs[0][1]['filename']
tw = imgs[0][1]['twidth']
th = imgs[0][1]["theight"]
ymd = o['postDate'].split('-')
yyyymm = ymd[0] + '%02d' % int(ymd[1])
dd = '%02d' % int(ymd[2])
fid = o['forumId']
fn = '/'.join((fid, yyyymm, dd, pv))
fn = '/' + fn
return fn, tw, th
else:
return '',0 ,0
def get_post_id_code(pid):
return gethash(pid+'___hash__str')
#def printimage(postfile):
# postfile = postfile.replace(FLAG_ERROR, '.post')
# with open(postfile) as f:
# post = json.loads(f.read())
# imgs = post['content'][0].get('localimages', {})
# for img in imgs.values():
# print os.path.join(os.path.dirname(postfile), 'resource', img['filename'])\
# , img['width'], img['height'], img['size']
#
#def checkftpdeletefailimage(imgpath):
# # /home/datas/fid/yyyymm/dd/resource/basename
# filename = os.path.basename(imgpath)
# obj = {}
# dirroot = os.path.join(os.path.dirname(imgpath), '..')
# for f in os.listdir(dirroot):
# if '.post' in f:
# f = os.path.join(dirroot ,f)
# with open(f, 'r') as ff:
# obj = json.loads(ff.read())
# for imginfo in obj['content'][0].get('localimages', {}).values():
# if filename == imginfo['filename']:
# print f
# break
# if obj != {}:
# break
# pprint.pprint(obj)
def exportrss(fids, num):
import PyRSS2Gen
if None == num:
num = -1
CAT_AISA_CEN = u'亚洲有码影片'
CAT_AISA_GALL = u'亚洲图片'
CAT_AISA_SIS = u'亚洲ss'
CATS = {230: CAT_AISA_CEN, 4: CAT_AISA_CEN, 64: CAT_AISA_GALL, 230:CAT_AISA_SIS, 58: CAT_AISA_SIS}
rssfile = 'export.xml'
droot = dataroot
# 遍历数据目录收集采集成功的post
postfiles = []
posts = []
for fid in fids.split(','):
fid = fid.strip()
droot = os.path.join(droot, fid)
if not os.path.exists(droot):continue
for yyyymm in os.listdir(droot):
yyyymm = os.path.join(droot, yyyymm)
for dd in os.listdir(yyyymm):
dd = os.path.join(yyyymm, dd)
for fname in os.listdir(dd):
if FLAG_SUC in fname:
fname = os.path.join(dd, fname)
postfile = fname.replace(FLAG_SUC, '.post')
transferedfile = postfile.replace('.post', FLAG_TRANSFERED_FILE)
# ftp完成的post
if os.path.exists(transferedfile) == True:
rssfile_ = postfile.replace('.post', FLAG_TRANSFERED_RSS)
if os.path.exists(rssfile_) == True: continue
postfiles.append(postfile)
print postfile
with open(postfile, 'r') as pf:
pftext = pf.read()
posts.append(json.loads(pftext))
rss = PyRSS2Gen.RSS2(
title = "Andrew's PyRSS2Gen feed",
link = "#",
description = "The latest news about PyRSS2Gen, a "
"Python library for generating RSS2 feeds",
lastBuildDate = datetime.datetime.now(),
items = [])
for ii, o in enumerate(posts[:int(num)]):
if ii % 3000 == 0 and 0 != ii:
rss.write_xml(open(rssfile+'.'+str(ii), "w"))
del rss.items[:]
fid, yyyymm, dd = get_info_from_path(postfiles[ii])
yyyy = yyyymm[:4]
mm = yyyymm[4:]
fid = o['forumId']
# categories = [ PyRSS2Gen.Category(CATS[int(fid)], 'fuckavmm.com'), ]
categories = [ CATS[int(fid)], ]
title = o['titleSimple']
ct = o['content'][0]['content']
previewfn, _, _ = getpreviewimge(o)
if 'localimages' in o['content'][0]:
images = o['content'][0]['localimages']
imgs = []
for i in images.values():
tpl = '<img src="http://img2.fuckavmm.com/%s" />'
fullname = '/datas/%s/%s/%s/%s' % (fid, yyyymm, dd, i['filename'])
imgs.append(tpl % fullname)
imgtext = '\n<br/>'.join(imgs)
ct = ct + '\n<br/>' + imgtext
# fullfilename, filename, size
ttext = '<br/><br/><br/><a href="http://img3.fuckavmm.com/%s">%s(%s)</a>'
if 'torrents' in o :
fname, finfo = o['torrents'].items()[0]
fullname = '/datas/%s/%s/%s/%s' % (fid, yyyymm, dd, fname)
size = finfo['size']
ttext = ttext % (fullname, u'点击下载种子文件', size)
ct += ttext
item = PyRSS2Gen.RSSItem(
title = title,
link = "#",
categories = categories,
description = ct,
enclosure = PyRSS2Gen.Image('http://img2.fuckavmm.com/datas/'+previewfn, '', ''),
guid = PyRSS2Gen.Guid("http://www.dalkescientific.com/news/030906-PyRSS2Gen.html"),
pubDate = datetime.datetime(int(yyyy), int(mm), int(dd), 0, 0))
rss.items.append(item)
pfile = postfiles[ii]
rssfile_ = pfile.replace('.post', FLAG_TRANSFERED_RSS)
with open(rssfile_, 'w') as rf:
rf.write('ok')
rss.write_xml(open(rssfile, "w"))
if __name__ == '__main__':
# print isdownloadfailed('K:\\home\\datas\\230\\201302\\14\\sis_4790708.post')
# exit(0)
usage = "usage: %prog [options] scan [-f] | download -i| sync -i {data [ls] -f| list}|rss -f -n"
parser = OptionParser(usage)
parser.add_option("-f", "--forumid", dest="fid",
help="forumid")
parser.add_option("-p", "--page", dest="page",
help="page index starts from 1")
parser.add_option("-n", "--num", dest="num",
help="top n for rss export")
parser.add_option("-i", "--input", dest="input",
help="E:/datas/64/201301/23/4761542.post")
(options, args) = parser.parse_args()
# download resource
if 'scan' in args:
gen_download_resource(options.fid)
exit(0)
if 'download' in args:
postfile = options.input
if postfile == None:
print 'need postfile '
exit(0)
download_resource(postfile)
exit(0)
# upload resource
if 'sync' in args:
# 生成list
if 'list' in args :
genlist = True
sync(genlist, fid=options.fid)
exit(0)
# 上传数据
if 'data' in args:
fid = options.fid
islist = False
if 'ls' in args:
islist = True
import_data(fids=fid, islist=islist)
exit(0)
postfile = options.input
sync(postfile=postfile)
exit(0)
# rss
if 'rss' in args:
fid = options.fid
num = options.num
exportrss(fid, num)
exit(0)
# if 'images' in args:
# printimage(options.input)
# exit(0)
# if 'showpost' in args:
# checkftpdeletefailimage(options.input)
# exit(0)
fids = options.fid
page = options.page
if page == None:
page = '1'
for fid in fids.split(','):
fid = fid.strip()
aifids = (16, 4, 5, 11, 6)
btype = 'sis'
furl = 'http://sexinsex.net/bbs/forum-%s-%s.html'
if int(fid) in aifids:
btype = 'aisex'
furl = 'http://www.aisex.com/bt/thread.php?fid=%s&page=%s'
# furl = 'http://sexinsex.net/bbs/forumdisplay.php?fid=%s&page=%s'
furl = furl % (fid, page)
fetchpost(furl, btype, fid)
# time python fetch.py scan |awk 'BEGIN{10000*srand();} {printf "%s %s\n", rand(), $0}' | sort -k1n | awk '{gsub($1FS,""); print $0}'| xargs -n1 -P15 python fetch.py download -i
# download -i K:\\home\\datas\\64\\201301\\23\\sis_4761542.post
# 309 手机综合区
'''
jieba:中出, 无毛
'''
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2010, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.2.0"
__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
elif isinstance(attrs, dict):
attrs = attrs.items()
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Python |
#coding=utf-8
'''
Created on 2011-8-30
@author: chris
'''
import sys
import urllib2
import urllib
import cookielib
from ripper.core import EncodingConvert
from ripper.core.Utils import enable_proxy #@UnresolvedImport
from ripper.core.Utils import disable_proxy #@UnresolvedImport
import json
import urlparse
from ripper.parser import BeautifulSoup
import os
sys.path.append('/harvester/ripper/source/src/')
from ripper.parser.Parser import Parser
from ripper.handler.converter.langconv import Converter
import re
import datetime
from ripper.core.Utils import clear_url #@UnresolvedImport
gheaders = None
reload(sys)
sys.setdefaultencoding('utf-8') #@UndefinedVariable
NEED_TAGS = (60 , 249, 6)
SIS_BBS_PREFIX = 'http://sexinsex.net/bbs/'
class SisParser(Parser):
''' sis 论坛的分析器 '''
# 扫描页数
POST_PAGES = 5
# 论坛板块样式
FORUM_PATTERN = r"<a href='(thread\.php\?fid=[0-9]+)' class='fnamecolor'>[\n.]*<b>(.*?)</b></a><br>"
# 帖子列表样式
POST_PATTERN = r"<a target=_blank href='(htm_data.*?)'>(.*?)</a>.*?(\n.*?)*?</td>\n<td class=smalltxt>\n(.*?)\n<br>(.*?)</td>"
# 帖子内容样式
THREAD_CONTENT = r"<span class='tpc_content'>(.*)"
# 种子链接的地址
TORRENT_URL_PATTERN = r'h\s*?t\s*?t\s*?p\s*?:\s*?/\s*?/.*?/.*?\s' # not enough yet, sth like ',' may appears in the tail
# 图片地址样式
IMAGE_URL_PATTERN1 = r"<img(.*?)src='(.*?)'\s*.*?>"
IMAGE_URL_PATTERN2 = r'<img(.*?)src="(.*?)"\s*.*?>'
# 下载附件必须登录, 提供给Engine的回调函数
def relogin(self, isFinishHtml=False):
if False == isFinishHtml:
return
if self.isLogin == False:
for i in range(0, 10):
try:
self.login_sis()
return
except Exception, ex:
print ex, 'retry...'
continue
print 'login failed.'
sys.exit()
def goto_page(self, pageUrl, num=1):
p = str(num) + '.html'
return pageUrl + p
# 解析列表
def parse_obj_list(self, pageIndex, fid=0):
# pageIndex.url = pageIndex.url.replace('www.sexinsex.net', '67.220.90.30')
c = Converter('zh-hans')
# 配置模板
# itemName = self.item.get_property('name')
# # 对应原数据库里面的版面ID
# forumMap = {
# u'sis_60' : 60, # 贴图, 卡通
# u'sis_64' : 64, # 贴图, 东方靓女集中营
# u'sis_62' : 64, # 贴图, 自拍, 合并
# u'sis_68' : 68, # 贴图, 西洋
# u'sis_sw' : 249, # 丝袜高跟
# u'sis_sm' : 249, # sm
# }
# fid = forumMap[itemName]
fid = urlparse.urlparse(pageIndex)[2].split('-')[1]
# 登陆
if self.isLogin == False:
self.login_sis()
self.isLogin = True
# text = self.get_content('http://www.sexinsex.net/forum/forumdisplay.php?fid=64')
text = self.get_content(pageIndex)
sp = self.get_soup(text=text)
tables = sp.findAll('table')
# 取倒数第二个table的帖子 (其他的为置顶或广告)
table = list(tables)[-2]
soup = table
spans = table.findAll('span')
ths = table.findAll('th')
posts = [] # url, name
# 标题和url
for th in ths:
spans = list(th.findAll('span'))
if len(spans) == 0 : continue
# 分类
em = ''
try:
em = th.findAll('em')[0].findAll('a')[0].contents[0].__str__()
except Exception:
pass
span = spans[0]
if not span.has_key('id'):continue
sid = span['id']
if sid == None or 'thread_' not in sid : continue
a = span.findAll('a')
if len(a) == 0 : continue
a = a[0]
name = a.contents[0].__str__()
href = a['href']
posts.append([href, name,em])
# 发布时间
ems = soup.findAll('em')
pt = r'[0-9]+-[0-9]+-[0-9]+'
bems = []
for a in ems:
if a.contents and a.contents[0].__str__() and re.match(pt, a.contents[0].__str__()):
bems.append(a.contents[0].__str__())
bems = list(p for p in bems if p != None)
if len(bems) > len(posts):
bems = bems[1:]
print len(bems), len(posts)
ct = 0
for p in posts:
p.append(bems[ct])
ct = ct + 1
# 返回帖子列表
for p in posts:
postDetailUrl = p[0]
title = p[1]
titleSimple = title
# convert title to simple chinese
try:
titleSimple = c.convert(title)
except Exception, ex:
pass
tag = p[2]
postDate = p[3]
postDateTime = datetime.datetime.strptime(postDate,'%Y-%m-%d')
obj = {'postDetailUrl' : 'http://www.sexinsex.net/forum/' + postDetailUrl,
'title' : title,
'titleSimple' : titleSimple,
# 'author' : 'harvester',
'forumId' : fid,
'postDate' : postDate,
'tag' : tag
}
# 需要tag
# if fid[0] in NEED_TAGS:
# if itemName == 'sis_sw':
# tag = u'丝袜-' + tag
# if itemName == 'sis_sm':
# tag = u'SM-' + tag
# obj['tag'] = tag
yield obj
def getpid(self, purl):
return 'sis_'+ purl.split('/')[-1].split('-')[1]
# 解析属性
def get_collectDate(self, url, prop, obj):
return str(datetime.datetime.utcnow())
# 解析属性
def get_rawContent(self, url, prop, obj):
return 'not available'
# return self.get_content(url)
# 解析属性
def get_ccontent(self, url, prop=None, obj=None):
ct = self.__get_post_content(url)
return ct
# 解析属性
def get_urls(self, url, prop, obj):
return []
def get_torrents(self, url, pid, destdir):
''' 下载种子 '''
names = []
soup = self.get_soup(url=url)
attdl = soup.findAll('dl', {'class' : 't_attachlist'})
if attdl != None:
for dl in attdl:
if u'查看BT种子信息' in dl.text:
a = dl.findAll('a')[0]
# 种子文件名
fname = a.text
fname = '%s_%s.torrent' % (fname.split('.')[0], pid)
turl = SIS_BBS_PREFIX + a['href']
self.get_file(turl, destdir, fname)
names.append(fname)
return names
# 解析属性
def get_images(self, text):
# url = url.replace('www.sexinsex.net', '67.220.90.30')
list_imgs1 = []
list_imgs2 = []
for m in re.findall(SisParser.IMAGE_URL_PATTERN1,text):
list_imgs1.append(clear_url(m[1]))
for m in re.findall(SisParser.IMAGE_URL_PATTERN2,text):
list_imgs1.append(clear_url(m[1]))
list_imgs1.extend(list_imgs2)
list_imgs1 = [i for i in list_imgs1 if '.gif' not in i.lower()]
return list_imgs1
def __get_post_content(self, url):
contents = self.get_all_content(url)
ct = ''
# try:
# ct = ''.join(map(lambda a : a.__str__(), contentDivs[0].contents))
# except Exception, err:
# print 'SisParser.__get_post_content => %s, %s fails' %(str(err), url)
print contents[0]['author']
return contents[0]['content']
def get_all_content(self, url):
if self.isLogin == False:
self.login_sis()
self.isLogin = True
c = Converter('zh-hans')
soup =self.get_soup(url=url)
authors = []
contents = []
authorTds = soup.findAll('td', {'class' : 'postauthor'})
contentDivs = soup.findAll('div', {'class' : 't_msgfont'})
# authors
for i in range(0, len(authorTds)):
td = authorTds[i]
cites = td.findAll('cite')
if len(cites) == 0 : continue
if len(cites[0].findAll('a')) == 0 :
authors.append(cites[0].contents[0].__str__().strip())
else:
a = cites[0].findAll('a')[0]
authors.append(a.contents[0].__str__())
# contents
# for i in range(1, len(contentDivs), 2):
for i in range(0, len(contentDivs)):
div = contentDivs[i]
if len(div.contents) != 0:
contents.append(''.join(map(lambda a:a.__str__(), div.text)))
else:
contents.append('no content for this')
if len(contents) == 0 : # one floor:
# 擂台区
if len(contentDivs) == 0:
contentDivs = soup.findAll('td', {'class' : 'postcontent'})
content = contentDivs[0].text
images = self.get_images(contentDivs[0].__str__())
return [{'author': None, 'content': content, 'images': images}, ]
# else
contents = [contentDivs[0].__str__(), ]
try:
contents[0] = c.convert(contents[0])
except Exception, ex:
pass
return [ {'author': authors[0], 'content': contents[0]} ]
content_list = []
###
### 存在不等的情况:
### 如果某楼被管理员禁言, 那么该楼的content就为空
### t_msgfont 不存在, class="notice"取代之
### 解决办法是填补contentdiv数组的空白
lct, lau = len(contents) , len(authors)
if lct < lau:
gap = lau - lct
for i in range(0, gap):
contents.append(' ')
assert len(contents) == len(authors)
for i, text in enumerate(contents):
ctdict = {}
ctdict['author'] = authors[i]
try:
text = c.convert(text)
except Exception, ex:
pass
ctdict['content'] = text
content_list.append(ctdict)
# 一楼图片
content_list[0]['images'] = self.get_images(contentDivs[0].__str__())
content_list[0]['content'] = contentDivs[0].text
ct = content_list[0]['content']
### 去垃圾
# 下载链接:圖片+種子
if 'HTTP 免空下載' in ct:
content_list[0]['content'] = ct.split('HTTP 免空下載')[0]
with open('1.txt', 'w') as f:
f.write(content_list[0]['content'])
return content_list
def testDetail(url):
a = SisParser(None, needProxy=True)
# a.login_sis()
# with open('json.txt', 'w') as f:
# for obj in a.parse_obj_list('http://sexinsex.net/bbs/forum-64-2.html'):
# ct = json.dumps(obj, ensure_ascii=False)
# f.write(ct + '\n')
ct = None
error_log = 'errors.txt'
try:
ct = a.get_all_content(url)
except Exception, err:
# 解析时出现问题
if not os.path.exists(error_log):
with open(error_log, 'w') as f:
f.write('%s, %s \n' % (err, url) )
else:
with open(error_log, 'a') as f:
f.write('%s, %s \n' % (err, url) )
a.get_torrents(url, 'sis_123', 'e:/')
# cc = '\n'.join(map(lambda x: x['content'], ct))
# print cc
# with open('11.txt', 'w') as f:
# f.write(cc)
def test_text():
fn = 'torrenttest.html'
ct = open(fn).read()
soup = BeautifulSoup.BeautifulSoup(ct)
attdl = soup.findAll('dl', {'class' : 't_attachlist'})
dl = attdl[0]
if u'查看BT种子信息' in dl.text:
a = dl.findAll('a')[0]
# 种子文件名
torrentfilename = a.text
turl = SIS_BBS_PREFIX + a['href']
print turl
if __name__ == '__main__':
# parser = SisParser(None, needProxy=True)
# parser.parse_obj_list(None, None, None, None)
# testDetail('http://www.aisex.com/bt/htm_data/4/1109/485578.html')
# testDetail('http://www.sexinsex.net/bbs/thread-1307060-5-1.html')
# testDetail('http://sexinsex.net/bbs/thread-4776774-1-1.html')
# a = open('json.txt').read().split('\n')[0]
# posts = json.loads(a)
# print posts['title']
# testDetail('http://sexinsex.net/bbs/thread-4536351-1-1.html')
# testDetail('http://sexinsex.net/bbs/viewthread.php?tid=3940994&highlight=HAVD')
# testDetail('http://sexinsex.net/bbs/thread-4774084-1-2.html') # 权限不够的页面
testDetail('http://sexinsex.net/bbs/thread-4784055-1-3.html') # 去垃圾
# test_text()
| Python |
#coding=utf-8
'''
Created on 2011-8-30
http相关, dom解析相关
@author: chris
'''
from ripper.core.Exceptions import ParseException, DownloadException
import urllib, os
import cookielib
from ripper.core.EncodingConvert import zh2gbk, zh2utf8, zh218030
from cookielib import FileCookieJar
try:
from bs4 import BeautifulSoup
except Exception:
from BeautifulSoup import BeautifulSoup
#from bs4 import BeautifulSoup
from ripper.core.Utils import enable_proxy, disable_proxy #@UnresolvedImport
import HTMLParser
import re
from ripper.parser import BeautifulSoup as bs32
#from bs4 import BeautifulSoup
import urllib2
import time
from ripper.core import EncodingConvert
HTMLParser.attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s>^\[\]{}\|\'\"]*))?')
class Parser(object):
'''
网页文本解析器
'''
# def __init__(self, url='', text=''):
# '''
# Constructor
# '''
# if '' != text:
# self.content = text
# elif '' != url and '' == text:
# self.content = self.__request_content(url)
# else :
# raise ParseException('解析异常, 没有可解析的内容或URL')
def __init__(self, item=None, needProxy=False, needConvert=True):
self.item = item
self.curUrl = ''
self.isLogin = False
self.needProxy = needProxy
self.cache = {} # url:content
cookieJar = cookielib.CookieJar()
cookie_support= urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
self.needConvert = needConvert
def login_aisex(self):
#login
postdata=urllib.urlencode({
'loginuser':'outlookxp',
'loginpwd':'123123',
'hideid':'0',
'cktime':'31536000',
'jumpurl':'http://www.aisex.com/bt/thread.php?fid=4',
'loginpwd':'123123',
'step':'2',
})
gheaders = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
'referer':'http://www.aisex.com/bt'
}
req = urllib2.Request(
url = 'http://www.aisex.com/bt/login.php',
data = postdata,
headers = gheaders
)
enable_proxy()
r = urllib2.urlopen(req)
text = r.read()
text = EncodingConvert.zh2utf8(text)[1]
disable_proxy()
return
def login_mySite(self):
#login
postdata=urllib.urlencode({
'user':'dajiji',
'pass':'1',
})
gheaders = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
}
req = urllib2.Request(
url = 'http://h.pphime.com/z/mlist/',
data = postdata,
headers = gheaders
)
r = urllib2.urlopen(req)
text = r.read()
text = EncodingConvert.zh2utf8(text)[1]
return
def login_sis(self):
#login
pattern = re.compile("<input type=\"hidden\" name=\"formhash\" value=\"\w*\" \/>")
content = self.get_content('http://sexinsex.net/forum/logging.php?action=login')
formhash = pattern.findall(content)
formhash = formhash[0][-12:-4]
postdata=urllib.urlencode({
'loginfield':'username',
'username':'outlookxx123',
'password':'bjitsm123456',
'referer': 'http://sexinsex.net/forum',
'formhash':formhash,
'questionid':'0',
'answer':''
})
self.gheaders = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
'referer':'http://sexinsex.net'
}
req = urllib2.Request(
url = 'http://sexinsex.net/forum/logging.php?action=login&loginsubmit=yes&inajax=1',
data = postdata,
headers = self.gheaders
)
if self.needProxy == True:
enable_proxy()
# filename = 'cookie.txt'
# ckjar = cookielib.MozillaCookieJar(filename)
# ckproc = urllib2.HTTPCookieProcessor(ckjar)
# opener = urllib2.build_opener(ckproc)
r = urllib2.urlopen(req)
# r = opener.open(req)
text = r.read()
# print zh2utf8(text)[1]
# ckjar.save(ignore_discard=True, ignore_expires=True)
return
def login_dg(self):
print 'loggin in dg2012...'
#login
pattern = re.compile("<input type=\"hidden\" name=\"formhash\" value=\"\w*\" \/>")
content = self.get_content('http://bbs.dg2012.com/logging.php?action=login')
formhash = pattern.findall(content)
formhash = formhash[0][-12:-4]
postdata=urllib.urlencode({
'loginfield':'username',
'username': zh2gbk('牙疼小狐狸')[1],
'password':'123',
'referer': 'index.php',
'cookietime': '2592000',
'formhash':formhash,
'questionid':'0',
'answer':'',
'styleid':'',
'loginmode':'',
'loginsubmit':'true',
})
self.gheaders = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
'referer':'http://bbs.dg2012.com'
}
req = urllib2.Request(
url = 'http://bbs.dg2012.com/logging.php?action=login',
data = postdata,
headers = self.gheaders
)
r = urllib2.urlopen(req)
text = r.read()
self.isLogin = True
return
def get_soup_attr(self, obj, idx1=None, idx2=None, name=None):
try:
return obj.attrs[idx1][idx2]
except Exception, ex:
return obj.attrs[name]
# needUrl : 返回 [内容, 可能的302转接后的实际url]
def get_content(self, url, needException=False, needUrl=False, gheaders=None, needContent=True):
# 重复请求缓冲
if self.cache.has_key(url):
return self.cache[url]
try:
ct, burl = self.get_page_html(url, needException, needUrl=True, gheaders=gheaders, needContent=needContent)
self.cache[url] = ct
if needUrl:
return ct, burl
else:
return ct
except ParseException, ex:
print ex
raise ex
except DownloadException, ex: # needed by uHentai
raise ex
self.curUrl = url
self.ct = self.ct.replace('\r\n','\n')
return self.ct
def get_soup(self, text=None, url=None):
soup = None
if text != None :
soup = self.__get_soup(text)
return soup
if url != None:
text = self.get_content(url)
soup = self.__get_soup(text)
return soup
def __get_soup(self, text):
soup = None
try:
soup = BeautifulSoup(text)
except HTMLParser.HTMLParseError, ex:
print ex
print 'switch to 3.2'
soup = bs32.BeautifulSoup(text)
return soup
# 取得某页列表的url, 返回分页后的url
# pageUrl 不 包含分页参数
def goto_page(self, pageUrl, num=1):
return pageUrl
#生成实体数据列表和每个实体数据的唯一标识(名称, url, etc...)
def parse_obj_list(self, pageIndex, keyProp, otherProps, needProxy=False):
pass
# 解析属性
def parse_property(self, property):
pass
# 请求web页面
def get_page_html(self, uri, retrys=10, needException=False, needUrl=False, gheaders=None, needContent=True):
#i_lock.acquire()
#time.sleep(WORK_INTERVAL)
if self.needProxy == True:
# 开启代理服务器
enable_proxy()
try:
ct, url = self._get_page_html(uri, gheaders=None, needContent=needContent)
if needUrl:
return ct, url
else:
return ct
except Exception, ex:
if needException==True:
raise DownloadException('needException')
print 'error getting', uri
print ex
if retrys > 0:
print 'retry'
time.sleep(5)
return self.get_page_html(uri, retrys=retrys-1)
else :
msg = 'already retry %d times, unable to fetch.' % retrys
print msg
raise ParseException(msg)
finally:
pass
if self.needProxy == True:
disable_proxy()
#i_lock.release()
def _get_page_html(self, uri, gheaders=None, needContent=True):
print 'fetching ', uri
gheaders = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
}
req = None
# urlOpener = urllib2.build_opener \
# (urllib2.HTTPCookieProcessor(FileCookieJar('cookie.txt')))
if gheaders != None:
try:
req = urllib2.Request(url=uri, headers=gheaders)
except AttributeError, err:
raise ParseException(str(err))
else:
try:
req = urllib2.Request(uri)
except AttributeError, err:
raise ParseException(str(err))
if needContent == False:
return None, None
breq = urllib2.urlopen(req)
# breq = urlOpener.open(req)
actualUrl = breq.url
ct = breq.read()
if self.needConvert == True:
ct = zh2utf8(ct)[1]
return ct,actualUrl
def get_file(self, uri, dir, fname):
if self.needProxy:
enable_proxy()
print 'fetching ', uri
gheaders = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
}
req = None
if gheaders != None:
try:
req = urllib2.Request(url=uri, headers=gheaders)
except AttributeError, err:
raise ParseException(str(err))
else:
try:
req = urllib2.Request(uri)
except AttributeError, err:
raise ParseException(str(err))
breq = urllib2.urlopen(req)
ct = breq.read()
fn = os.path.join(dir, fname)
f = open(fn, 'wb')
f.write(ct)
f.close()
if self.needProxy == True:
disable_proxy()
return fn,len(ct)
def get_cache(self, key):
if self.cache.has_key(key):
return self.cache[key]
else:
return None
| Python |
#coding=utf-8
'''
Created on 2011-8-30
@author: chris
'''
import sys
sys.path.append('/harvester/newRipper/ripper/src/')
import urllib2
import urllib
import cookielib
from ripper.core import EncodingConvert
from ripper.core.Utils import enable_proxy #@UnresolvedImport
from ripper.core.Utils import disable_proxy #@UnresolvedImport
from ripper.parser.Parser import Parser
import os.path
from ripper.handler.converter.langconv import Converter
import re
import datetime
import codecs
from ripper.handler.TorrentDownload import canDownload, TorrentDownloader
from ripper.core.Utils import clear_url #@UnresolvedImport
gheaders = None
class AisexParser(Parser):
''' aisex.com 论坛的分析器 '''
# 扫描页数
POST_PAGES = 5
# 论坛板块样式
FORUM_PATTERN = r"<a href='(thread\.php\?fid=[0-9]+)' class='fnamecolor'>[\n.]*<b>(.*?)</b></a><br>"
# 帖子列表样式
POST_PATTERN = r"<a target=_blank href='(htm_data.*?)'>(.*?)</a>.*?(\n.*?)*?</td>\n<td class=smalltxt>\n(.*?)\n<br>(.*?)</td>"
# 帖子内容样式
THREAD_CONTENT = r"<span class='tpc_content'>(.*)"
# 种子链接的地址
TORRENT_URL_PATTERN = r'h\s*?t\s*?t\s*?p\s*?:\s*?/\s*?/.*?/.*?\s' # not enough yet, sth like ',' may appears in the tail
# 图片地址样式
IMAGE_URL_PATTERN = "<img(.*?)src='(.*?)'\s*.*?>"
def goto_page(self, pageUrl, num=1):
p = '&page=' + str(num)
return pageUrl + p
# 解析列表
def parse_obj_list(self, pageIndex, fid):
c = Converter('zh-hans')
# 配置模板
# 对应原数据库里面的版面ID
forumMap = {
'aisex_asia' : 1,
'aisex_asia_uncen' : 1,
'aisex_euro' : 2,
'aisex_hentai' : 3,
'aisex_game' : 4,
'aisex_pic_euro' : 5,
'aisex_pic_asia' : 6,
'aisex_film' : 12,
}
# self.login_aisex()
text = self.get_content(pageIndex)
sp = self.get_soup(text=text)
trs = sp.findAll('tr', {'class':'tr3 t_one'})
for tr in trs:
postDateTime = ''
titleSimple = ''
tds = tr.findAll('td')
td1 = tds[1]
postDetailUrl = os.path.join('http://www.aisex.com/bt/', td1.findAll('a')[0]['href'])
title = td1.findAll('a')[0].text
b = tds[2].text # xxxx2009-01-02
s = b.split('-')
author = s[0][:-4]
postDate = '-'.join([ s[0][-4:], s[1], s[2] ])
author = author.replace('\r\n', '')
postDate = postDate.replace('\r\n', '')
# convert title to simple chinese
if title :
titleSimple = c.convert(title)
# convert old str date col to new datime col
postDateTime = None
try:
postDateTime = datetime.datetime.strptime(postDate,'%Y-%m-%d')
except Exception, ex:
pass
yield {'postDetailUrl' : postDetailUrl,
'title' : title,
'titleSimple' : titleSimple,
'author' : author,
'forumId' : fid,
'postDate' : postDate,
'postDateTime' : ''}
def getpid(self, purl):
return 'ai_' + purl.split('/')[-1].split('.')[0]
# 解析属性
def get_collectDate(self, url, prop, obj):
return str(datetime.datetime.utcnow())
# 解析属性
def get_forumId(self, url, prop, obj):
return int(re.findall(r'fid=([0-9])', AisexParser._pageIndex)[0])
# 解析属性
def get_rawContent(self, url, prop, obj):
return 'not available'
# return self.get_content(url)
# 解析属性
def get_ccontent(self, url):
soup = None
try:
soup = self.get_soup(url=url)
floor1 = soup.findAll('div', {'class' : 'tpc_content'})[0]
ct = floor1.text
ct = ct.replace(AisexParser.TORRENT_URL_PATTERN, '')
return ct
except Exception , ex:
print ex
return u'Error getting content data'
except RuntimeError, err:
return u'Error getting content data'
finally:
pass
# 解析属性
def get_urls(self, url):
text = self.get_content(url)
images = self.get_images(url)
pageUrls = []
text = text.replace('@', '')
for m in re.findall(AisexParser.TORRENT_URL_PATTERN,text):
x = m.strip()
# url 的地址不和图片地址重复
if x not in images\
and '.css' not in x.lower() :
pageUrls.append(clear_url(x))
return list(set(pageUrls))
# 解析属性
def get_torrents(self, url, pid, fdir):
filenames = []
urls = self.get_urls(url)
for url in urls:
if canDownload(url) == True:
url = clear_url(url)
t = TorrentDownloader(url)
fname, _ = t.download(fdir)
filenames.append(fname)
return filenames
# 解析属性
def get_images(self, url):
soup = self.get_soup(url=url)
floor1 = soup.findAll('div', {'class' : 'tpc_content'})[0]
list_imgs = []
for i in floor1.findAll('img'):
list_imgs.append(i['src'])
return list(set(list_imgs))
def get_all_content(self, url):
imgs = self.get_images(url)
ct = self.get_ccontent(url)
c = Converter('zh-hans')
clist = []
author = ''
return [{'content': ct, 'author': author, 'images': imgs}, ]
def parse_index(self):
# text = open('h:/aisexlist.htm').read()
enable_proxy()
loginTarget='http://www.aisex.com/bt/login.php'
SITE_HEADER='http://www.aisex.com/'
loginText = self.get_content(loginTarget)
#cookie
cookieJar = cookielib.CookieJar()
cookie_support= urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
#login
postdata=urllib.urlencode({
'loginuser':'outlookxp',
'loginpwd':'123123',
'hideid':'0',
'cktime':'31536000',
'jumpurl':'http://www.aisex.com/bt/thread.php?fid=4',
'loginpwd':'123123',
'step':'2',
})
gheaders = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
'referer':'http://www.aisex.com/bt'
}
req = urllib2.Request(
url = 'http://www.aisex.com/bt/login.php',
data = postdata,
headers = gheaders
)
text = urllib2.urlopen(req).read()
text = EncodingConvert.zh2utf8(text)[1]
text = self.get_content('http://70.85.48.252/bt/thread.php?fid=4&search=&page=2')
c = Converter('zh-hans')
for m in re.findall(AisexParser.POST_PATTERN,text):
postDateTime = ''
titleSimple = ''
pageUrl, title, author,postDate \
= os.path.join('http://www.aisex.com/bt/', m[0]), m[1], m[-2],m[-1]
# convert title to simple chinese
if title :
titleSimple = c.convert(codecs.getdecoder('utf-8')(title)[0])
# convert old str date col to new datime col
postDateTime = datetime.datetime.strptime(postDate,'%Y-%m-%d')
print pageUrl, title, titleSimple, author, postDate, postDateTime
def parse_detail(self):
text = open('h:/Best of Five Dynatec Ti Reviews.htm').read()
soup = self.get_soup(text=text)
ratings = soup.findAll('td', {'class' : 'rating'})
fratings = soup.findAll('td', {'class' : 'rating firstrating'})
td = ratings[0]
img = soup.findAll('meta', {'property' : 'og:image'})
print img[0]['content']
priceSpan = soup.findAll('span', {'id' : 'actual_price'})
price = priceSpan[0].text
print price
def testDetail(url):
a = AisexParser(None, True)
for k in a.parse_obj_list(url, 1):
print k
# t = a.get_torrents(url, None)
# print t
# print a.get_ccontent(url)
# print a.get_images(url)
# print a.get_torrents(url, '', 'e:/')
if __name__ == '__main__':
# parser = AisexParser(None, needProxy=True)
# parser.parse_index()
# testDetail('http://www.aisex.com/bt/htm_data/4/1204/519889.html')
# testDetail('http://www.aisex.com/bt/htm_data/4/1302/558736.html')
# testDetail('http://www.aisex.com/bt/htm_data/4/1302/558689.html')
testDetail('http://www.aisex.com/bt/thread.php?fid=4&page=1')
| Python |
# | Python |
# coding=utf-8
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Petru Paler
from ripper.core import EncodingConvert
class BTFailure(Exception):
pass
def decode_int(x, f):
f += 1
newf = x.index('e', f)
n = int(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
n = int(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
lastkey = None
while x[f] != 'e':
k, f = decode_string(x, f)
if lastkey >= k:
raise ValueError
lastkey = k
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
def bdecode(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError, ValueError):
raise BTFailure("not a valid bencoded string")
if l != len(x):
#raise BTFailure("invalid bencoded value (data after valid prefix)")
pass
return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType
class Bencached(object):
__slots__ = ['bencoded']
def __init__(self, s):
self.bencoded = s
def encode_bencached(x,r):
r.append(x.bencoded)
def encode_int(x, r):
r.extend(('i', str(x), 'e'))
def encode_bool(x, r):
if x:
encode_int(1, r)
else:
encode_int(0, r)
def encode_string(x, r):
r.extend((str(len(x)), ':', x))
def encode_list(x, r):
r.append('l')
for i in x:
encode_func[type(i)](i, r)
r.append('e')
def encode_dict(x,r):
r.append('d')
ilist = x.items()
ilist.sort()
for k, v in ilist:
r.extend((str(len(k)), ':', k))
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[Bencached] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
try:
from types import BooleanType
encode_func[BooleanType] = encode_bool
except ImportError:
pass
def bencode(x):
r = []
encode_func[type(x)](x, r)
return ''.join(r)
def parseFile(filename):
r = []
f = open(filename,'rb')
try:
r = bdecode(f.read())['info']['files']
finally:
f.close()
return r
def getTorrentInfo(filename):
if filename == None :
return None, None
if '.torrent' not in filename:
filename = filename + '.torrent'
''' in bytes '''
r = None
try:
r = parseFile(filename)
except Exception:
return None,None
totalSize = 0
files = []
for f in r:
totalSize += f['length']
name = f['path'][0]
# name = name.decode('big5').encode('utf-8'
try:
name = EncodingConvert.zh2utf8(name)[1]
except Exception :
pass
files.append(name)
mbSz = totalSize/1024/1024
if mbSz < 1024 :
mbSz = '%s MB' % str(mbSz)
else:
mbSz = '%s GB' % str(round((mbSz /1024.0),2))
return files, mbSz
def printTorrentInfo(filename):
# in byte
files, sz = getTorrentInfo(filename)
# in mb
mbSz = sz/1024/1024
if mbSz < 1024 :
return '%s MB' % str(mbSz)
return '%s GB' % str(round((mbSz /1024.0),2))
if __name__=='__main__':
print printTorrentInfo('c:/DaDuanTui.torrent')
| Python |
# coding=UTF-8
import sys
import os
try:
import Image # request PIL 1.1.6
import ImageOps
except ImportError:
print 'PIL 1.1.6 required'
class Thumbnail:
def __init__(self,orgImg,destDir,destName=None): # orginal img filename with full-path
self.org = orgImg
self.destDir = destDir
self.destName = destName
self.scaleWidth = 225.0
self.scaleHeigth = -1.0
self.dest = self.getDestImg(self.org,self.destDir)
self.file = Image.open(self.org)
self.quality = Image.ANTIALIAS
def getDestImg(self,org,destDir):
filename = os.path.basename(org)
if self.destName != None:
return destDir+'/'+self.destName
return destDir+'/'+filename
def create(self, crop=False):
'''call pil to resize the img to fixed width and heigth'''
cropHeigth = 300
orgWidth, orgHeigth = self.getOrginalSize()
if orgWidth < self.scaleWidth:
self.scaleWidth = orgWidth
self.scaleHeigth = orgHeigth
else:
ratio = orgWidth / self.scaleWidth
self.scaleHeigth = orgHeigth / ratio
# call pil to resize the image
size = int(self.scaleWidth), int(self.scaleHeigth)
self.file.thumbnail(size,self.quality)
# crop
if crop == True:
if cropHeigth > self.scaleHeigth:
cropHeigth = self.scaleHeigth
box = (0,0,int(self.scaleWidth),int(cropHeigth))
region = self.file.crop(box)
#region = region.transpose(Image.ROTATE_180)
img = Image.new(self.file.mode,box[2:])
img.paste(region, box)
# print 'crop>>',box
try:
img.save(self.dest,'JPEG')
except IOError, err:
if img.mode != "RGB":
img = img.convert("RGB")
img.save(self.dest,'JPEG')
# 120x120
i2 = ImageOps.fit(self.file, (120, 120))
smallcrop = self.dest.replace('crop_', 'cropsmall_')
i2.save(smallcrop)
return int(self.scaleWidth),int(cropHeigth)
self.file.save(self.dest,'JPEG')
return int(self.scaleWidth), int(cropHeigth)
def reduce(self, scaleWidth=500):
'''call pil to resize the img to fixed width and heigth'''
orgWidth, orgHeigth = self.getOrginalSize()
if orgWidth < scaleWidth:
return
else:
ratio = orgWidth / self.scaleWidth
self.scaleHeigth = orgHeigth / ratio
# call pil to resize the image
size = int(self.scaleWidth), int(self.scaleHeigth)
self.file.thumbnail(size,self.quality)
self.file.save(self.dest,'JPEG')
def getOrginalSize(self):
''' call pil to read the width and heigth '''
return self.file.size
def createThumbnail(fname):
# print 'creating thumbnail for',fname
thumbnailPath = os.path.abspath(os.path.dirname(fname))
destName = os.path.basename(fname)
try:
thumbnail = Thumbnail(fname, thumbnailPath, destName)
thumbnail.create(False)
except Exception, ex:
print ex
# print 'thumbnail created.'
def createThumbnailForDir(dirname):
# print 'creating thumbnail for images in ', dirname
for fname in os.listdir(dirname):
createThumbnail(os.path.join(dirname, fname))
# print 'done'
def test():
org = 'c:/ray.jpg'
destDir = 'c:/bea/'
destName='crop2_ray.jpg'
thumbnail = Thumbnail(org,destDir,destName)
thumbnail.create(False)
def test2():
org = 'c:/111.jpg'
dest = 'c:/222.jpg'
f = Image.open(org).save(org)
if __name__ == '__main__':
a = Thumbnail('c:/dx/mpl-studios-ava-cup-of-tea-72.jpg', 'C:/dx/', 'crop_mpl-studios-ava-cup-of-tea-72.jpg')
a.create(True)
# test2()
# createThumbnailForDir('c:/images')
# i = Image.open('c:/mpl-studios-ava-cup-of-tea-7.jpg')
# i2 = ImageOps.fit(i, (120, 120))
# i2.save('c:/mpl-studios-ava-cup-of-tea-73.jpg')
## print i.size
## i = i.resize(i.size(), )
# i.save('c:/mpl-studios-ava-cup-of-tea-72.jpg') | Python |
#coding=utf-8
from ripper.core import EncodingConvert
from ripper.core.Exceptions import DownloadException
import uuid
import urllib,sys,os,re,time
from ripper.core.Utils import enable_proxy, disable_proxy #@UnresolvedImport
from ripper.core.Utils import clear_url #@UnresolvedImport
import socket
import urlparse
import md5
try:
import Image # request PIL 1.1.6
except ImportError:
print 'PIL 1.1.6 required'
socket.setdefaulttimeout(35)
NAME_MAP = ('a b c d e f g h i j k l m n o p q r s t u v w x y z').split()
__doc__='''
http下载相关
'''
class HttpHandler:
''' http下载类 '''
def __init__(self, baseDir, useProxy=False):
self.baseDir = baseDir
self.defaultUseProxy = useProxy
self.useProxy = useProxy
self.proxies = {}
if useProxy == True:
self.enableProxy()
def enableProxy(self):
self.useProxy = True
# for url in Utils.getConfig('httpproxy').split(','):
# self.proxies['http'] = url
enable_proxy()
def disableProxy(self):
self.useProxy = False
self.proxies = {}
disable_proxy()
# 下载html页面
def getContent(self,burl,needConvert=False):
burl = clear_url(burl)
conn = urllib.urlopen(burl,proxies=self.proxies)
tstart = time.time()
content = conn.read()
tcost = str(time.time() - tstart)
noticeText = 'I have parsed '+burl+',It costs'+tcost+' seconds'+ (self.useProxy == True and '(throught proxy)' or '')
encc,content = EncodingConvert.zh2utf8(content)
print 'page encoding:',encc
if float(tcost) > 30.0 :
noticeText = noticeText + ', What the fuck, why takes so long...'
elif float(tcost) > 100.0 :
noticeText = noticeText + 'dude, you may consult 10000 :)'
# 文本格式转换
if needConvert == True:
content = content.replace('\r\n','\n')
return content
def getImage_safe(self,imgUrl, retrys=3):
for i in range(0, retrys):
try:
vals = self.getImage(imgUrl)
except DownloadException, ex:
print '%s, retry %d times remaining...' % (imgUrl, retrys-i)
time.sleep(3)
continue
return vals
return '404.jpg', -1, (0, 0)
# 通过url下载图片,返回保存在本地的filename
def getImage(self,imgUrl,preAssignFilename=None, fixed=False):
# imgUrl = fixurl(imgUrl)
filename = None
if None == preAssignFilename :
filename = get_file_name(imgUrl)
else:
filename = preAssignFilename
try:
opener = urllib.FancyURLopener(self.proxies)
imgDir = self.baseDir
tstart = time.time()
fn,headers = opener.retrieve(imgUrl)
tp = str(headers.gettype())
# 根据header的type判断文件类型并添加扩展名
if re.match('.*?jp[e]*g.*',tp):
filename = filename + '.jpg'
elif re.match('.*?gif.*',tp):
filename = filename + '.gif'
elif re.match('.*?bmp.*',tp):
filename = filename + '.bmp'
elif re.match('.*?png.*',tp):
filename = filename + '.bmp'
elif tp == 'application/octet-stream':
filename = filename + os.path.basename(fn)
elif 'image' not in tp:
# 非图片内容
if fixed == False:
return self.getImage(fixurl(imgUrl),fixed=True)
else:
raise DownloadException(u'%s not a images: %s' %( imgUrl ,tp) )
# 保存文件
absName = os.path.join(imgDir, filename)
ct = open(fn,'rb').read()
if len(ct) < 10:
raise DownloadException('image too small')
f = open(absName,'wb')
f.write(ct)
f.close()
tcost = str(time.time() - tstart)[0:5]
notice = 'Download finished:'+filename+',costs'+tcost+' seconds.'+ (self.useProxy == True and '(throught proxy)' or '')
fl = -1
# compress img
image_size = (0, 0)
# 最宽图片大小
maxwidth = 900
try:
img = Image.open(os.path.join(imgDir, filename))
image_size = img.size
if image_size[0] > maxwidth:
resizeimage(os.path.join(imgDir, filename), maxwidth)
else:
img.save(os.path.join(imgDir, filename))
img.close()
except Exception:
pass
# get file size
try:
fl = os.path.getsize(os.path.join(imgDir, filename))
except Exception:
pass
return filename,fl,image_size
except UnicodeError, err:
print err
if fixed == False:
return self.getImage(fixurl(imgUrl),fixed=True)
if self.useProxy == True:
self.disableProxy()
raise DownloadException(u'Download failed:' + imgUrl)
else:
self.enableProxy()
return self.getImage(imgUrl,filename)
except Exception, ex:
print ex
raise DownloadException(u'Download failed:' + imgUrl)
finally:
if self.useProxy == True:
self.disableProxy()
def getFile(self,imgUrl,preAssignFilename=None, fixed=False):
filename = None
if None == preAssignFilename :
filename = get_file_name(imgUrl)
else:
filename = preAssignFilename
try:
opener = urllib.FancyURLopener(self.proxies)
imgDir = self.baseDir
tstart = time.time()
fn,headers = opener.retrieve(imgUrl)
tp = str(headers.gettype())
# 根据header的type判断文件类型并添加扩展名
if re.match('.*?jp[e]*g.*',tp):
filename = filename + '.jpg'
elif re.match('.*?gif.*',tp):
filename = filename + '.gif'
elif re.match('.*?bmp.*',tp):
filename = filename + '.bmp'
elif re.match('.*?png.*',tp):
filename = filename + '.bmp'
elif re.match('.*?torrent.*',tp):
filename = filename + '.torrent'
elif tp == 'application/octet-stream':
filename = filename + os.path.basename(fn)
elif 'image' not in tp:
# 非图片内容
if fixed == False:
return self.getImage(fixurl(imgUrl),fixed=True)
else:
raise DownloadException(u'not a images:' + imgUrl)
# 保存文件
absName = os.path.join(imgDir, filename)
ct = open(fn,'rb').read()
if len(ct) < 10:
raise DownloadException('image too small')
f = open(absName,'wb')
f.write(ct)
f.close()
tcost = str(time.time() - tstart)[0:5]
notice = 'Download finished:'+filename+',costs'+tcost+' seconds.'+ (self.useProxy == True and '(throught proxy)' or '')
fl = -1
# compress img
try:
Image.open(os.path.join(imgDir, filename)).save(os.path.join(imgDir, filename))
except Exception:
pass
# get file size
try:
fl = os.path.getsize(os.path.join(imgDir, filename))
except Exception:
pass
return filename,fl
except UnicodeError, err:
print err
if fixed == False:
return self.getImage(fixurl(imgUrl),fixed=True)
if self.useProxy == True:
self.disableProxy()
raise DownloadException(u'Download failed:' + imgUrl)
else:
self.enableProxy()
return self.getImage(imgUrl,filename)
except Exception, ex:
print ex
raise DownloadException(u'Download failed:' + imgUrl)
finally:
if self.useProxy == True:
self.disableProxy()
def fixurl(url):
# turn string into unicode
if not isinstance(url,unicode):
url = url.decode('utf8')
# parse it
parsed = urlparse.urlsplit(url)
# divide the netloc further
userpass,at,hostport = parsed.netloc.partition('@')
user,colon1,pass_ = userpass.partition(':')
host,colon2,port = hostport.partition(':')
# encode each component
scheme = parsed.scheme.encode('utf8')
user = urllib.quote(user.encode('utf8'))
colon1 = colon1.encode('utf8')
pass_ = urllib.quote(pass_.encode('utf8'))
at = at.encode('utf8')
host = host.encode('idna')
colon2 = colon2.encode('utf8')
port = port.encode('utf8')
path = '/'.join( # could be encoded slashes!
urllib.quote(urllib.unquote(pce).encode('utf8'),'')
for pce in parsed.path.split('/')
)
query = urllib.quote(urllib.unquote(parsed.query).encode('utf8'),'=&?/')
fragment = urllib.quote(urllib.unquote(parsed.fragment).encode('utf8'))
# put it back together
netloc = ''.join((user,colon1,pass_,at,host,colon2,port))
return urlparse.urlunsplit((scheme,netloc,path,query,fragment))
# 生成UUID, 命名图片和文本文件
# 修改成固定文件名, 根据url生成
def get_file_name(url):
m = md5.new()
m.update(url)
md = m.hexdigest()
md = str(md)
return md
def getId():
id0 = str(hex(int(str(time.time()).replace('.',''))))
id1 = str(uuid.uuid1())
return id0+'-'+id1
def resizeimage(imgpath, scaleWidth):
img = Image.open(imgpath)
orgWidth, orgHeigth = img.size
if orgWidth > scaleWidth:
ratio = orgWidth / scaleWidth
scaleHeigth = orgHeigth / ratio
# call pil to resize the image
size = (int(scaleWidth), int(scaleHeigth))
img.thumbnail(size, Image.ANTIALIAS)
img.save(imgpath)
if __name__ == '__main__':
# uri = 'http://img3.douban.com/pview/event_p2ster/large/public/0e46182405722ce.jpg'
# dd = HttpHandler('e:/datas')
# print dd.getImage(uri)
# print get_file_name('http://www.asd.dd.d3/dss.jpg')
resizeimage('c:/dx/mpl-studios-ava-cup-of-tea-72.jpg', 500)
| Python |
#coding=utf-8
import urllib,urllib2,cookielib,re,datetime,socket
import EncodingConvert as ec
import time, threading, traceback, os, sys, uuid
from ripper.handler import socks
'''
Discuz论坛爬虫,带附件下载
原作http://www.colordancer.net/blog/?p=325
修改chris
2010-09-18
'''
gheaders = None
SITE_DOMAIN = ''
SITE_HEADER = SITE_DOMAIN + '/forum/'
rootpath = '.\\downloads\\'
ZXBASE = 'e:/zx2/'
i_lock = threading.Lock()
WORK_INTERVAL = 1
# timeout in seconds
timeout = 60
#socket.setdefaulttimeout(timeout)
def clear_url(torrentUrl):
torrentUrl = torrentUrl.replace(' ','')
torrentUrl = torrentUrl.replace('@','')
torrentUrl = torrentUrl.split('<')[0]
torrentUrl = torrentUrl.split('\'')[0]
torrentUrl = torrentUrl.split('\"')[0]
return torrentUrl
# 返回正则表达式匹配的段落
def getRegFromText(pt, ct):
m = re.findall(pt, ct)
if len(m) == 0:
return None
if len(m) == 1:
return m[0]
return m
def getPageHtml(uri, retry=5):
#i_lock.acquire()
#time.sleep(WORK_INTERVAL)
try:
return _getPageHtml(uri)
except Exception, ex:
print 'error getting', uri
print ex
if retry > 0:
print 'retry'
time.sleep(8-retry)
return getPageHtml(uri, retry-1)
else :
print 'already retry %d times, game over.' % retry
finally:
pass
#i_lock.release()
def _getPageHtml(uri):
print 'fetching ', uri
req = None
if gheaders != None:
req = urllib2.Request(url=uri, headers=gheaders)
else:
req = urllib2.Request(uri)
ct = urllib2.urlopen(req).read()
ct = ec.zh2utf8(ct)[1]
return ct
#return urllib.urlopen(uri).read()
# 下载论坛附件
def getAttachment(uri, fname, retry=15):
print 'downloading attach to ', fname
try:
#name2 = os.path.join(os.path.dirname(sys.argv[0]), fname)
name2 = fname
__getAttachment(uri, name2)
# 切割压缩图片文件
# if isImageFile(name2):
# createThumbnail(os.path.join(ZXBASE, name2))
return fname
except Exception, ex:
traceback.print_exc(file=sys.stdout)
print 'error getting attache', uri
if retry > 0:
print 'retry'
time.sleep(26-retry)
return getAttachment(uri, fname, retry-1)
else :
print 'attachment already retry %d times, game over.' % retry
finally:
pass
def __getAttachment(uri, fname):
print 'fetching ', uri
req = None
if gheaders != None:
req = urllib2.Request(url=uri, headers=gheaders)
else:
req = urllib2.Request(uri)
r = urllib2.urlopen(req)
fp = open(os.path.join(ZXBASE, fname), 'wb')
fp.write(r.read())
fp.close()
return fname
def login():
'''登陆论坛
设置cookie,获得formhash,然后提交post数据 '''
global gheaders
if gheaders != None:
print 'already login.'
return
#获得formhash
print 'prepare to login',SITE_HEADER,'...'
pattern = re.compile("<input type=\"hidden\" name=\"formhash\" value=\"\w*\" \/>")
content = getPageHtml(SITE_HEADER + 'logging.php?action=login')
formhash = pattern.findall(content)
if (len(formhash) > 0):
formhash = formhash[0]
formhash = formhash[-12:-4]
#cookie
cookieJar = cookielib.CookieJar()
cookie_support= urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
#login
postdata=urllib.urlencode({
'loginfield':'username',
'username':'outlookxx123',
'password':'bjitsm123456',
'referer': SITE_DOMAIN,
'formhash':formhash,
'questionid':'0',
'answer':''
})
gheaders = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
'referer':SITE_DOMAIN
}
print 'login.. '
req = urllib2.Request(
url = SITE_HEADER + 'logging.php?action=login&loginsubmit=yes&inajax=1',
data = postdata,
headers = gheaders
)
result = urllib2.urlopen(req).read()
#f = open('logs/last_login_result.html', 'w')
#f.write(result)
#f.close()
print 'login compelete. '
def getPages():
page = getPageHtml(SITE_HEADER + 'thread-3350257-1-1.html')
pattern = re.compile("<link>.*viewthread.php.*<\/link>")
linkArray = pattern.findall(page)
return linkArray
def tgetLinks(url):
ct = getPageHtml(url)
return getLinks(ct)
def getLinks(pageContent):
#遍历页面
count = 1
names = []
#for url in urls:
#url = url[6:-7]
#print "parsing" + url
#pageContent = getPageHtml(url)
#print pageContent
pattern = re.compile('<a href=\"attachment\.php\?aid=.*>.*<\/a>')
anchors = pattern.findall(pageContent)
#遍历下载节点
for anchor in anchors:
name = ''
try:
#print anchor
linkPattern = re.compile('\"attachment\.php\?aid=[a-zA-Z0-9\%&;=\?-_\B]*\"')
link = linkPattern.findall(anchor)
link = SITE_HEADER + link[0][1:-1]
#print 'attach link',link
namePattern = re.compile('>[^<>].*[^<>]<\/')
name = namePattern.findall(anchor)
name = name[0][1:-2]
name = stripInvalChar(name)
date = GetDateString()
time = GetTimeString()
# name = rootpath + date + "\\" + time + "_" + getId() + '_' + name
#print "download " + link
#print "to" + name
count = count + 1
download(link,name)
except Exception, ex:
traceback.print_exc(file=sys.stdout)
if name != '':
names.append(name)
return names
def stripInvalChar(name):
name = name.replace('(', ' ')
name = name.replace(')', ' ')
return name
def download(url,filename):
MakeDateFolder(rootpath)
#urllib.urlretrieve(url, filename)
getAttachment(url, filename)
def GetTimeString():
from datetime import datetime
todayStr = datetime.today().strftime("%H_%M_%S")
return todayStr
def GetDateString():
from datetime import date
#todayStr = date.today().strftime("%Y_%m_%d")
todayStr = date.today().strftime("%Y_%m")
return todayStr
def MakeDateFolder( inFolderName ):
if os.path.isdir( inFolderName ):
newFolderName = inFolderName + '\\\\' + GetDateString()
newFolderName = os.path.join(ZXBASE, newFolderName)
#print 'making new download dir:', newFolderName
if os.path.isdir( newFolderName ):
#print(newFolderName," Exists already ")
pass
else:
os.mkdir( newFolderName )
#print(newFolderName," Create OK ")
def isImageFile(fname):
imgs = ['.jpg', '.jpeg', '.png', '.bmp']
for i in imgs:
if i in fname.lower():
return True
return False
# 生成UUID, 命名图片和文本文件
def getId():
id0 = str(hex(int(str(time.time()).replace('.',''))))
id1 = str(uuid.uuid1())
return id0+'-'+id1
def enable_proxy():
print 'enabling socket proxy:' , '127.0.0.1:7070'
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 7070)
socket.socket = socks.socksocket
# socket.create_connection = _create_connection
def disable_proxy():
socks.setdefaultproxy()
if __name__ == '__main__':
login()
#getAttachment(SITE_HEADER + 'attachment.php?aid=1388706&noupdate=yes', 'attText1.gif')
#getLinks([SITE_HEADER + 'thread-3269681-1-5.html', ])
#getLinks(open('raw/attachTorrent.html').read())
tgetLinks('http://64.120.179.165/forum/thread-3384142-1-1.html')
#pagelinks = getPages()
#attlinks = getLinks([SITE_HEADER + 'thread-3350257-1-1.html'])
| Python |
#coding=utf-8
'''
Created on 2011-8-30
@author: chris
'''
class RipperException(Exception):
'''
严重异常, 中断执行
'''
def __init__(self, msg):
'''
Constructor
'''
self.msg = msg
print msg
class ParseException(RipperException):
def __init__(self, msg):
RipperException(msg)
# 普通异常, 不中断执行
class DownloadException(RipperException):
def __init__(self, msg):
RipperException(msg)
self.msg = msg | Python |
#coding=utf-8
'''
Created on 2013-2-8
@author: Administrator
'''
sync_db_host = 'fuckavmm.com'
sync_db_user = 'ihamster'
sync_db_pwd = '123z123'
sync_db_name = 'ihamster_harvester'
#sync_db_host = '127.0.0.1'
#sync_db_user = 'root'
#sync_db_pwd = ''
#sync_db_name = 'harvester'
| Python |
#coding=utf-8
'''
分析
~35k posts, costs ~ 45m
'''
#import jieba
#import jieba.posseg as pseg
from optparse import OptionParser
import os, sys
import json
import re
sys.path.append(os.path.abspath(os.path.dirname(os.path.join(__file__, '../../'))))
from ripper.syncconfig import sync_db_host, sync_db_user, sync_db_pwd, sync_db_name
import MySQLdb
from datetime import date
import gc
#gc.disable()
# tag输入文件: tag_type
tagsource = {'codes.txt': 1, 'names.txt': 2}
print 'connecting...', sync_db_host
db = MySQLdb.connect(sync_db_host, sync_db_user, sync_db_pwd, sync_db_name)
print 'connected'
db.set_character_set('utf8')
class BrandTagging(object):
'''Count tags against post title or content'''
def get_conn(self):
print 'connecting...', sync_db_host
_db = MySQLdb.connect(sync_db_host, sync_db_user, sync_db_pwd, sync_db_name)
print 'connected'
_db.set_character_set('utf8')
return _db
def _load_post_titles(self):
sql = 'select title, pid from post where fid in (4, 230, 143) '
all = []
cur = db.cursor()
cur.execute(sql)
# result = cur.fetchone()
# while result != None:
# title, pid = result
# yield title, pid
# result = cur.fetchone()
result = cur.fetchall()
for r in result:
title, pid = r
all.append( (title, pid) )
cur.close()
return all
def count_tag(self):
tagdict = {}
print 'loading tags...'
for sourcefile, tagtype in tagsource.items():
with open(sourcefile, 'r') as ff:
for line in ff.read().split('\n'):
line = line.strip()
if line == '': continue
if tagtype == 1:
# 匹配code, 两侧不能为字母
pt = '.*?[^a-zA-Z]*?' + line + '[^a-zA-Z]'
tagdict.setdefault(tagtype, {})[(re.compile(pt), line)] = [0, set() ]
else:
tagdict.setdefault(tagtype, {})[line.lower()] = [0, set() ]
print 'loading post titles....'
all = self._load_post_titles()
print 'analyzing ...'
for post in all:
title, pid = post
title = title.upper()
#print '..........', title
for tagtype, tagnamedict in tagdict.iteritems():
for tagname in tagnamedict.iterkeys():
if tagtype == 1:
tagname_re, _tagname = tagname
#if None != tagname_re.match(title):
if _tagname in title or \
_tagname.replace(' ', '') in title:
#print '0000000000', title
tagnamedict[tagname][0] += 1
tagnamedict[tagname][1].add(pid)
elif tagtype == 2:
# 匹配人名
if tagname in title or \
tagname.replace(' ', '') in title:
tagnamedict[tagname][0] += 1
tagnamedict[tagname][1].add(pid)
self.transfer_tag_data(tagdict)
def transfer_tag_data(self, tagdict):
'''上传tag数据'''
sqltpl = 'insert into tag (tag_key, tag_name, tag_type, tag_count) values (%s,%s,%s,%s) '
sqltpl_rela = 'insert into tag_rela (tag_key, pid, tag_type) values (%s,%s,%s) '
tags = []
relas = []
print 'preparing data...'
for tagtype , tagnamedict in tagdict.iteritems():
for tagname, vals in tagnamedict.iteritems():
count, pids = vals
if count != 0:
if tagtype == 1:
tagname = tagname[1]
tagkey = '%s_%d' % (tagname, tagtype)
tags.append([ tagkey,
tagname,
int(tagtype),
int(count)])
for pid in pids :
rela = [tagkey, pid, tagtype]
relas.append(rela)
print 'inserting...'
db = self.get_conn()
cur = db.cursor()
cur.execute('SET autocommit=0;')
cur.execute('truncate table tag')
cur.execute('truncate table tag_rela')
cur.executemany(sqltpl, tags)
cur.executemany(sqltpl_rela, relas)
print 'commiting...'
db.commit()
cur.close()
class MenuItemCounter(object):
def __init__(self):
tags = []
for fid in self.loadAllFids():
for cat, count in self.gettags(fid):
data = [fid, cat, count]
tags.append(data)
cur = db.cursor()
cur.execute('SET autocommit=0;')
cur.execute('truncate table menu')
sql = 'insert into menu (fid, cat, count) values(%s, %s, %s)'
cur.executemany(sql, tags)
print 'commiting...'
db.commit()
cur.close()
def loadAllFids(self):
sql = 'select distinct(fid) from post'
cur = db.cursor()
cur.execute(sql)
all = []
result = cur.fetchall()
for fid in result:
all.append(fid[0])
cur.close()
return all
def gettags(self, fid):
sql = 'select cat, count(1) from post where fid=%s group by cat' %str(fid)
cur = db.cursor()
cur.execute(sql)
all = []
result = cur.fetchall()
for result in result:
cat, ct = result
if cat == '':
cat = u'无分类'
all.append((cat, ct))
cur.close()
return all
def stat():
sql = '''select sum(a.sz) from ( select pid,size, CONVERT(SUBSTRING_INDEX(size,' ',1),UNSIGNED INTEGER) as sz from torrent where size like '%MB%' ) a'''
cur = db.cursor()
cur.execute(sql)
all_mb = cur.fetchone()[0]
sql = '''select sum(a.sz) from ( select pid,size, CONVERT(SUBSTRING_INDEX(size,' ',1),UNSIGNED INTEGER) as sz from torrent where size like '%GB%' ) a'''
cur = db.cursor()
cur.execute(sql)
all_gb = cur.fetchone()[0]
all_tb = all_mb/1024/1024 + all_gb/1024
if __name__ == '__main__':
usage = u"usage: %prog 无参数:同步tag, menu:fid->cat to menu"
parser = OptionParser(usage)
(options, args) = parser.parse_args()
if 'menu' in args:
print 'counting menu items...'
MenuItemCounter()
else:
print 'counting name and codes...'
BrandTagging().count_tag()
| Python |
# coding=UTF-8
import sys
import os
try:
import Image # request PIL 1.1.6
import ImageOps
except ImportError:
print 'PIL 1.1.6 required'
class Thumbnail:
def __init__(self,orgImg,destDir,destName=None): # orginal img filename with full-path
self.org = orgImg
self.destDir = destDir
self.destName = destName
self.scaleWidth = 225.0
self.scaleHeigth = -1.0
self.dest = self.getDestImg(self.org,self.destDir)
self.file = Image.open(self.org)
self.quality = Image.ANTIALIAS
def getDestImg(self,org,destDir):
filename = os.path.basename(org)
if self.destName != None:
return destDir+'/'+self.destName
return destDir+'/'+filename
def create(self, crop=False):
'''call pil to resize the img to fixed width and heigth'''
cropHeigth = 300
orgWidth, orgHeigth = self.getOrginalSize()
if orgWidth < self.scaleWidth:
self.scaleWidth = orgWidth
self.scaleHeigth = orgHeigth
else:
ratio = orgWidth / self.scaleWidth
self.scaleHeigth = orgHeigth / ratio
# call pil to resize the image
size = int(self.scaleWidth), int(self.scaleHeigth)
self.file.thumbnail(size,self.quality)
# crop
if crop == True:
if cropHeigth > self.scaleHeigth:
cropHeigth = self.scaleHeigth
box = (0,0,int(self.scaleWidth),int(cropHeigth))
region = self.file.crop(box)
#region = region.transpose(Image.ROTATE_180)
img = Image.new(self.file.mode,box[2:])
img.paste(region, box)
# print 'crop>>',box
try:
img.save(self.dest,'JPEG')
except IOError, err:
if img.mode != "RGB":
img = img.convert("RGB")
img.save(self.dest,'JPEG')
try:
# 120x120
i2 = ImageOps.fit(self.file, (120, 120))
smallcrop = self.dest.replace('crop_', 'cropsmall_')
i2.save(smallcrop)
except Exception, ex:
pass
return int(self.scaleWidth),int(cropHeigth)
self.file.save(self.dest,'JPEG')
return int(self.scaleWidth), int(cropHeigth)
def reduce(self, scaleWidth=500):
'''call pil to resize the img to fixed width and heigth'''
orgWidth, orgHeigth = self.getOrginalSize()
if orgWidth < scaleWidth:
return
else:
ratio = orgWidth / self.scaleWidth
self.scaleHeigth = orgHeigth / ratio
# call pil to resize the image
size = int(self.scaleWidth), int(self.scaleHeigth)
self.file.thumbnail(size,self.quality)
self.file.save(self.dest,'JPEG')
def getOrginalSize(self):
''' call pil to read the width and heigth '''
return self.file.size
def createThumbnail(fname):
# print 'creating thumbnail for',fname
thumbnailPath = os.path.abspath(os.path.dirname(fname))
destName = os.path.basename(fname)
try:
thumbnail = Thumbnail(fname, thumbnailPath, destName)
thumbnail.create(False)
except Exception, ex:
print ex
# print 'thumbnail created.'
def createThumbnailForDir(dirname):
# print 'creating thumbnail for images in ', dirname
for fname in os.listdir(dirname):
createThumbnail(os.path.join(dirname, fname))
# print 'done'
def test():
org = 'c:/ray.jpg'
destDir = 'c:/bea/'
destName='crop2_ray.jpg'
thumbnail = Thumbnail(org,destDir,destName)
thumbnail.create(False)
def test2():
org = 'c:/111.jpg'
dest = 'c:/222.jpg'
f = Image.open(org).save(org)
if __name__ == '__main__':
a = Thumbnail('c:/dx/mpl-studios-ava-cup-of-tea-72.jpg', 'C:/dx/', 'crop_mpl-studios-ava-cup-of-tea-72.jpg')
a.create(True)
# test2()
# createThumbnailForDir('c:/images')
# i = Image.open('c:/mpl-studios-ava-cup-of-tea-7.jpg')
# i2 = ImageOps.fit(i, (120, 120))
# i2.save('c:/mpl-studios-ava-cup-of-tea-73.jpg')
## print i.size
## i = i.resize(i.size(), )
# i.save('c:/mpl-studios-ava-cup-of-tea-72.jpg')
| Python |
# -*- coding: utf-8 -*-
# copy fom wikipedia
zh2Hant = {
u'呆': u'獃',
u"打印机": u"印表機",
u'帮助文件': u'說明檔案',
u"画": u"畫",
u"板": u"板",
u"表": u"表",
u"才": u"才",
u"丑": u"醜",
u"出": u"出",
u"淀": u"澱",
u"冬": u"冬",
u"范": u"範",
u"丰": u"豐",
u"刮": u"刮",
u"后": u"後",
u"胡": u"胡",
u"回": u"回",
u"伙": u"夥",
u"姜": u"薑",
u"借": u"借",
u"克": u"克",
u"困": u"困",
u"漓": u"漓",
u"里": u"里",
u"帘": u"簾",
u"霉": u"霉",
u"面": u"面",
u"蔑": u"蔑",
u"千": u"千",
u"秋": u"秋",
u"松": u"松",
u"咸": u"咸",
u"向": u"向",
u"余": u"餘",
u"郁": u"鬱",
u"御": u"御",
u"愿": u"願",
u"云": u"雲",
u"芸": u"芸",
u"沄": u"沄",
u"致": u"致",
u"制": u"制",
u"朱": u"朱",
u"筑": u"築",
u"准": u"準",
u"厂": u"廠",
u"广": u"廣",
u"辟": u"闢",
u"别": u"別",
u"卜": u"卜",
u"沈": u"沈",
u"冲": u"沖",
u"种": u"種",
u"虫": u"蟲",
u"担": u"擔",
u"党": u"黨",
u"斗": u"鬥",
u"儿": u"兒",
u"干": u"乾",
u"谷": u"谷",
u"柜": u"櫃",
u"合": u"合",
u"划": u"劃",
u"坏": u"壞",
u"几": u"幾",
u"系": u"系",
u"家": u"家",
u"价": u"價",
u"据": u"據",
u"卷": u"捲",
u"适": u"適",
u"蜡": u"蠟",
u"腊": u"臘",
u"了": u"了",
u"累": u"累",
u"么": u"麽",
u"蒙": u"蒙",
u"万": u"萬",
u"宁": u"寧",
u"朴": u"樸",
u"苹": u"蘋",
u"仆": u"僕",
u"曲": u"曲",
u"确": u"確",
u"舍": u"舍",
u"胜": u"勝",
u"术": u"術",
u"台": u"台",
u"体": u"體",
u"涂": u"塗",
u"叶": u"葉",
u"吁": u"吁",
u"旋": u"旋",
u"佣": u"傭",
u"与": u"與",
u"折": u"折",
u"征": u"徵",
u"症": u"症",
u"恶": u"惡",
u"发": u"發",
u"复": u"復",
u"汇": u"匯",
u"获": u"獲",
u"饥": u"飢",
u"尽": u"盡",
u"历": u"歷",
u"卤": u"滷",
u"弥": u"彌",
u"签": u"簽",
u"纤": u"纖",
u"苏": u"蘇",
u"坛": u"壇",
u"团": u"團",
u"须": u"須",
u"脏": u"臟",
u"只": u"只",
u"钟": u"鐘",
u"药": u"藥",
u"同": u"同",
u"志": u"志",
u"杯": u"杯",
u"岳": u"岳",
u"布": u"布",
u"当": u"當",
u"吊": u"弔",
u"仇": u"仇",
u"蕴": u"蘊",
u"线": u"線",
u"为": u"為",
u"产": u"產",
u"众": u"眾",
u"伪": u"偽",
u"凫": u"鳧",
u"厕": u"廁",
u"启": u"啟",
u"墙": u"牆",
u"壳": u"殼",
u"奖": u"獎",
u"妫": u"媯",
u"并": u"並",
u"录": u"錄",
u"悫": u"愨",
u"极": u"極",
u"沩": u"溈",
u"瘘": u"瘺",
u"硷": u"鹼",
u"竖": u"豎",
u"绝": u"絕",
u"绣": u"繡",
u"绦": u"絛",
u"绱": u"緔",
u"绷": u"綳",
u"绿": u"綠",
u"缰": u"韁",
u"苧": u"苎",
u"莼": u"蒓",
u"说": u"說",
u"谣": u"謠",
u"谫": u"譾",
u"赃": u"贓",
u"赍": u"齎",
u"赝": u"贗",
u"酝": u"醞",
u"采": u"採",
u"钩": u"鉤",
u"钵": u"缽",
u"锈": u"銹",
u"锐": u"銳",
u"锨": u"杴",
u"镌": u"鐫",
u"镢": u"钁",
u"阅": u"閱",
u"颓": u"頹",
u"颜": u"顏",
u"骂": u"罵",
u"鲇": u"鯰",
u"鲞": u"鯗",
u"鳄": u"鱷",
u"鸡": u"雞",
u"鹚": u"鶿",
u"荡": u"盪",
u"锤": u"錘",
u"㟆": u"㠏",
u"㛟": u"𡞵",
u"专": u"專",
u"业": u"業",
u"丛": u"叢",
u"东": u"東",
u"丝": u"絲",
u"丢": u"丟",
u"两": u"兩",
u"严": u"嚴",
u"丧": u"喪",
u"个": u"個",
u"临": u"臨",
u"丽": u"麗",
u"举": u"舉",
u"义": u"義",
u"乌": u"烏",
u"乐": u"樂",
u"乔": u"喬",
u"习": u"習",
u"乡": u"鄉",
u"书": u"書",
u"买": u"買",
u"乱": u"亂",
u"争": u"爭",
u"于": u"於",
u"亏": u"虧",
u"亚": u"亞",
u"亩": u"畝",
u"亲": u"親",
u"亵": u"褻",
u"亸": u"嚲",
u"亿": u"億",
u"仅": u"僅",
u"从": u"從",
u"仑": u"侖",
u"仓": u"倉",
u"仪": u"儀",
u"们": u"們",
u"优": u"優",
u"会": u"會",
u"伛": u"傴",
u"伞": u"傘",
u"伟": u"偉",
u"传": u"傳",
u"伣": u"俔",
u"伤": u"傷",
u"伥": u"倀",
u"伦": u"倫",
u"伧": u"傖",
u"伫": u"佇",
u"佥": u"僉",
u"侠": u"俠",
u"侣": u"侶",
u"侥": u"僥",
u"侦": u"偵",
u"侧": u"側",
u"侨": u"僑",
u"侩": u"儈",
u"侪": u"儕",
u"侬": u"儂",
u"俣": u"俁",
u"俦": u"儔",
u"俨": u"儼",
u"俩": u"倆",
u"俪": u"儷",
u"俫": u"倈",
u"俭": u"儉",
u"债": u"債",
u"倾": u"傾",
u"偬": u"傯",
u"偻": u"僂",
u"偾": u"僨",
u"偿": u"償",
u"傥": u"儻",
u"傧": u"儐",
u"储": u"儲",
u"傩": u"儺",
u"㑩": u"儸",
u"兑": u"兌",
u"兖": u"兗",
u"兰": u"蘭",
u"关": u"關",
u"兴": u"興",
u"兹": u"茲",
u"养": u"養",
u"兽": u"獸",
u"冁": u"囅",
u"内": u"內",
u"冈": u"岡",
u"册": u"冊",
u"写": u"寫",
u"军": u"軍",
u"农": u"農",
u"冯": u"馮",
u"决": u"決",
u"况": u"況",
u"冻": u"凍",
u"净": u"凈",
u"凉": u"涼",
u"减": u"減",
u"凑": u"湊",
u"凛": u"凜",
u"凤": u"鳳",
u"凭": u"憑",
u"凯": u"凱",
u"击": u"擊",
u"凿": u"鑿",
u"刍": u"芻",
u"刘": u"劉",
u"则": u"則",
u"刚": u"剛",
u"创": u"創",
u"删": u"刪",
u"刬": u"剗",
u"刭": u"剄",
u"刹": u"剎",
u"刽": u"劊",
u"刿": u"劌",
u"剀": u"剴",
u"剂": u"劑",
u"剐": u"剮",
u"剑": u"劍",
u"剥": u"剝",
u"剧": u"劇",
u"㓥": u"劏",
u"㔉": u"劚",
u"劝": u"勸",
u"办": u"辦",
u"务": u"務",
u"劢": u"勱",
u"动": u"動",
u"励": u"勵",
u"劲": u"勁",
u"劳": u"勞",
u"势": u"勢",
u"勋": u"勛",
u"勚": u"勩",
u"匀": u"勻",
u"匦": u"匭",
u"匮": u"匱",
u"区": u"區",
u"医": u"醫",
u"华": u"華",
u"协": u"協",
u"单": u"單",
u"卖": u"賣",
u"卢": u"盧",
u"卫": u"衛",
u"却": u"卻",
u"厅": u"廳",
u"厉": u"厲",
u"压": u"壓",
u"厌": u"厭",
u"厍": u"厙",
u"厐": u"龎",
u"厘": u"釐",
u"厢": u"廂",
u"厣": u"厴",
u"厦": u"廈",
u"厨": u"廚",
u"厩": u"廄",
u"厮": u"廝",
u"县": u"縣",
u"叁": u"叄",
u"参": u"參",
u"双": u"雙",
u"变": u"變",
u"叙": u"敘",
u"叠": u"疊",
u"号": u"號",
u"叹": u"嘆",
u"叽": u"嘰",
u"吓": u"嚇",
u"吕": u"呂",
u"吗": u"嗎",
u"吣": u"唚",
u"吨": u"噸",
u"听": u"聽",
u"吴": u"吳",
u"呐": u"吶",
u"呒": u"嘸",
u"呓": u"囈",
u"呕": u"嘔",
u"呖": u"嚦",
u"呗": u"唄",
u"员": u"員",
u"呙": u"咼",
u"呛": u"嗆",
u"呜": u"嗚",
u"咏": u"詠",
u"咙": u"嚨",
u"咛": u"嚀",
u"咝": u"噝",
u"咤": u"吒",
u"响": u"響",
u"哑": u"啞",
u"哒": u"噠",
u"哓": u"嘵",
u"哔": u"嗶",
u"哕": u"噦",
u"哗": u"嘩",
u"哙": u"噲",
u"哜": u"嚌",
u"哝": u"噥",
u"哟": u"喲",
u"唛": u"嘜",
u"唝": u"嗊",
u"唠": u"嘮",
u"唡": u"啢",
u"唢": u"嗩",
u"唤": u"喚",
u"啧": u"嘖",
u"啬": u"嗇",
u"啭": u"囀",
u"啮": u"嚙",
u"啴": u"嘽",
u"啸": u"嘯",
u"㖞": u"喎",
u"喷": u"噴",
u"喽": u"嘍",
u"喾": u"嚳",
u"嗫": u"囁",
u"嗳": u"噯",
u"嘘": u"噓",
u"嘤": u"嚶",
u"嘱": u"囑",
u"㖊": u"噚",
u"噜": u"嚕",
u"嚣": u"囂",
u"园": u"園",
u"囱": u"囪",
u"围": u"圍",
u"囵": u"圇",
u"国": u"國",
u"图": u"圖",
u"圆": u"圓",
u"圣": u"聖",
u"圹": u"壙",
u"场": u"場",
u"坂": u"阪",
u"块": u"塊",
u"坚": u"堅",
u"坜": u"壢",
u"坝": u"壩",
u"坞": u"塢",
u"坟": u"墳",
u"坠": u"墜",
u"垄": u"壟",
u"垅": u"壠",
u"垆": u"壚",
u"垒": u"壘",
u"垦": u"墾",
u"垩": u"堊",
u"垫": u"墊",
u"垭": u"埡",
u"垱": u"壋",
u"垲": u"塏",
u"垴": u"堖",
u"埘": u"塒",
u"埙": u"塤",
u"埚": u"堝",
u"埯": u"垵",
u"堑": u"塹",
u"堕": u"墮",
u"𡒄": u"壈",
u"壮": u"壯",
u"声": u"聲",
u"壶": u"壺",
u"壸": u"壼",
u"处": u"處",
u"备": u"備",
u"够": u"夠",
u"头": u"頭",
u"夸": u"誇",
u"夹": u"夾",
u"夺": u"奪",
u"奁": u"奩",
u"奂": u"奐",
u"奋": u"奮",
u"奥": u"奧",
u"奸": u"姦",
u"妆": u"妝",
u"妇": u"婦",
u"妈": u"媽",
u"妩": u"嫵",
u"妪": u"嫗",
u"姗": u"姍",
u"姹": u"奼",
u"娄": u"婁",
u"娅": u"婭",
u"娆": u"嬈",
u"娇": u"嬌",
u"娈": u"孌",
u"娱": u"娛",
u"娲": u"媧",
u"娴": u"嫻",
u"婳": u"嫿",
u"婴": u"嬰",
u"婵": u"嬋",
u"婶": u"嬸",
u"媪": u"媼",
u"嫒": u"嬡",
u"嫔": u"嬪",
u"嫱": u"嬙",
u"嬷": u"嬤",
u"孙": u"孫",
u"学": u"學",
u"孪": u"孿",
u"宝": u"寶",
u"实": u"實",
u"宠": u"寵",
u"审": u"審",
u"宪": u"憲",
u"宫": u"宮",
u"宽": u"寬",
u"宾": u"賓",
u"寝": u"寢",
u"对": u"對",
u"寻": u"尋",
u"导": u"導",
u"寿": u"壽",
u"将": u"將",
u"尔": u"爾",
u"尘": u"塵",
u"尝": u"嘗",
u"尧": u"堯",
u"尴": u"尷",
u"尸": u"屍",
u"层": u"層",
u"屃": u"屓",
u"屉": u"屜",
u"届": u"屆",
u"属": u"屬",
u"屡": u"屢",
u"屦": u"屨",
u"屿": u"嶼",
u"岁": u"歲",
u"岂": u"豈",
u"岖": u"嶇",
u"岗": u"崗",
u"岘": u"峴",
u"岙": u"嶴",
u"岚": u"嵐",
u"岛": u"島",
u"岭": u"嶺",
u"岽": u"崬",
u"岿": u"巋",
u"峄": u"嶧",
u"峡": u"峽",
u"峣": u"嶢",
u"峤": u"嶠",
u"峥": u"崢",
u"峦": u"巒",
u"崂": u"嶗",
u"崃": u"崍",
u"崄": u"嶮",
u"崭": u"嶄",
u"嵘": u"嶸",
u"嵚": u"嶔",
u"嵝": u"嶁",
u"巅": u"巔",
u"巩": u"鞏",
u"巯": u"巰",
u"币": u"幣",
u"帅": u"帥",
u"师": u"師",
u"帏": u"幃",
u"帐": u"帳",
u"帜": u"幟",
u"带": u"帶",
u"帧": u"幀",
u"帮": u"幫",
u"帱": u"幬",
u"帻": u"幘",
u"帼": u"幗",
u"幂": u"冪",
u"庄": u"莊",
u"庆": u"慶",
u"庐": u"廬",
u"庑": u"廡",
u"库": u"庫",
u"应": u"應",
u"庙": u"廟",
u"庞": u"龐",
u"废": u"廢",
u"廪": u"廩",
u"开": u"開",
u"异": u"異",
u"弃": u"棄",
u"弑": u"弒",
u"张": u"張",
u"弪": u"弳",
u"弯": u"彎",
u"弹": u"彈",
u"强": u"強",
u"归": u"歸",
u"彝": u"彞",
u"彦": u"彥",
u"彻": u"徹",
u"径": u"徑",
u"徕": u"徠",
u"忆": u"憶",
u"忏": u"懺",
u"忧": u"憂",
u"忾": u"愾",
u"怀": u"懷",
u"态": u"態",
u"怂": u"慫",
u"怃": u"憮",
u"怄": u"慪",
u"怅": u"悵",
u"怆": u"愴",
u"怜": u"憐",
u"总": u"總",
u"怼": u"懟",
u"怿": u"懌",
u"恋": u"戀",
u"恒": u"恆",
u"恳": u"懇",
u"恸": u"慟",
u"恹": u"懨",
u"恺": u"愷",
u"恻": u"惻",
u"恼": u"惱",
u"恽": u"惲",
u"悦": u"悅",
u"悬": u"懸",
u"悭": u"慳",
u"悮": u"悞",
u"悯": u"憫",
u"惊": u"驚",
u"惧": u"懼",
u"惨": u"慘",
u"惩": u"懲",
u"惫": u"憊",
u"惬": u"愜",
u"惭": u"慚",
u"惮": u"憚",
u"惯": u"慣",
u"愠": u"慍",
u"愤": u"憤",
u"愦": u"憒",
u"慑": u"懾",
u"懑": u"懣",
u"懒": u"懶",
u"懔": u"懍",
u"戆": u"戇",
u"戋": u"戔",
u"戏": u"戲",
u"戗": u"戧",
u"战": u"戰",
u"戬": u"戩",
u"戯": u"戱",
u"户": u"戶",
u"扑": u"撲",
u"执": u"執",
u"扩": u"擴",
u"扪": u"捫",
u"扫": u"掃",
u"扬": u"揚",
u"扰": u"擾",
u"抚": u"撫",
u"抛": u"拋",
u"抟": u"摶",
u"抠": u"摳",
u"抡": u"掄",
u"抢": u"搶",
u"护": u"護",
u"报": u"報",
u"拟": u"擬",
u"拢": u"攏",
u"拣": u"揀",
u"拥": u"擁",
u"拦": u"攔",
u"拧": u"擰",
u"拨": u"撥",
u"择": u"擇",
u"挂": u"掛",
u"挚": u"摯",
u"挛": u"攣",
u"挜": u"掗",
u"挝": u"撾",
u"挞": u"撻",
u"挟": u"挾",
u"挠": u"撓",
u"挡": u"擋",
u"挢": u"撟",
u"挣": u"掙",
u"挤": u"擠",
u"挥": u"揮",
u"挦": u"撏",
u"挽": u"輓",
u"捝": u"挩",
u"捞": u"撈",
u"损": u"損",
u"捡": u"撿",
u"换": u"換",
u"捣": u"搗",
u"掳": u"擄",
u"掴": u"摑",
u"掷": u"擲",
u"掸": u"撣",
u"掺": u"摻",
u"掼": u"摜",
u"揽": u"攬",
u"揾": u"搵",
u"揿": u"撳",
u"搀": u"攙",
u"搁": u"擱",
u"搂": u"摟",
u"搅": u"攪",
u"携": u"攜",
u"摄": u"攝",
u"摅": u"攄",
u"摆": u"擺",
u"摇": u"搖",
u"摈": u"擯",
u"摊": u"攤",
u"撄": u"攖",
u"撑": u"撐",
u"㧑": u"撝",
u"撵": u"攆",
u"撷": u"擷",
u"撸": u"擼",
u"撺": u"攛",
u"㧟": u"擓",
u"擞": u"擻",
u"攒": u"攢",
u"敌": u"敵",
u"敛": u"斂",
u"数": u"數",
u"斋": u"齋",
u"斓": u"斕",
u"斩": u"斬",
u"断": u"斷",
u"无": u"無",
u"旧": u"舊",
u"时": u"時",
u"旷": u"曠",
u"旸": u"暘",
u"昙": u"曇",
u"昼": u"晝",
u"昽": u"曨",
u"显": u"顯",
u"晋": u"晉",
u"晒": u"曬",
u"晓": u"曉",
u"晔": u"曄",
u"晕": u"暈",
u"晖": u"暉",
u"暂": u"暫",
u"暧": u"曖",
u"机": u"機",
u"杀": u"殺",
u"杂": u"雜",
u"权": u"權",
u"杆": u"桿",
u"条": u"條",
u"来": u"來",
u"杨": u"楊",
u"杩": u"榪",
u"杰": u"傑",
u"构": u"構",
u"枞": u"樅",
u"枢": u"樞",
u"枣": u"棗",
u"枥": u"櫪",
u"枧": u"梘",
u"枨": u"棖",
u"枪": u"槍",
u"枫": u"楓",
u"枭": u"梟",
u"柠": u"檸",
u"柽": u"檉",
u"栀": u"梔",
u"栅": u"柵",
u"标": u"標",
u"栈": u"棧",
u"栉": u"櫛",
u"栊": u"櫳",
u"栋": u"棟",
u"栌": u"櫨",
u"栎": u"櫟",
u"栏": u"欄",
u"树": u"樹",
u"栖": u"棲",
u"栗": u"慄",
u"样": u"樣",
u"栾": u"欒",
u"桠": u"椏",
u"桡": u"橈",
u"桢": u"楨",
u"档": u"檔",
u"桤": u"榿",
u"桥": u"橋",
u"桦": u"樺",
u"桧": u"檜",
u"桨": u"槳",
u"桩": u"樁",
u"梦": u"夢",
u"梼": u"檮",
u"梾": u"棶",
u"梿": u"槤",
u"检": u"檢",
u"棁": u"梲",
u"棂": u"欞",
u"椁": u"槨",
u"椟": u"櫝",
u"椠": u"槧",
u"椤": u"欏",
u"椭": u"橢",
u"楼": u"樓",
u"榄": u"欖",
u"榅": u"榲",
u"榇": u"櫬",
u"榈": u"櫚",
u"榉": u"櫸",
u"槚": u"檟",
u"槛": u"檻",
u"槟": u"檳",
u"槠": u"櫧",
u"横": u"橫",
u"樯": u"檣",
u"樱": u"櫻",
u"橥": u"櫫",
u"橱": u"櫥",
u"橹": u"櫓",
u"橼": u"櫞",
u"檩": u"檁",
u"欢": u"歡",
u"欤": u"歟",
u"欧": u"歐",
u"歼": u"殲",
u"殁": u"歿",
u"殇": u"殤",
u"残": u"殘",
u"殒": u"殞",
u"殓": u"殮",
u"殚": u"殫",
u"殡": u"殯",
u"㱮": u"殨",
u"㱩": u"殰",
u"殴": u"毆",
u"毁": u"毀",
u"毂": u"轂",
u"毕": u"畢",
u"毙": u"斃",
u"毡": u"氈",
u"毵": u"毿",
u"氇": u"氌",
u"气": u"氣",
u"氢": u"氫",
u"氩": u"氬",
u"氲": u"氳",
u"汉": u"漢",
u"汤": u"湯",
u"汹": u"洶",
u"沟": u"溝",
u"没": u"沒",
u"沣": u"灃",
u"沤": u"漚",
u"沥": u"瀝",
u"沦": u"淪",
u"沧": u"滄",
u"沪": u"滬",
u"泞": u"濘",
u"注": u"註",
u"泪": u"淚",
u"泶": u"澩",
u"泷": u"瀧",
u"泸": u"瀘",
u"泺": u"濼",
u"泻": u"瀉",
u"泼": u"潑",
u"泽": u"澤",
u"泾": u"涇",
u"洁": u"潔",
u"洒": u"灑",
u"洼": u"窪",
u"浃": u"浹",
u"浅": u"淺",
u"浆": u"漿",
u"浇": u"澆",
u"浈": u"湞",
u"浊": u"濁",
u"测": u"測",
u"浍": u"澮",
u"济": u"濟",
u"浏": u"瀏",
u"浐": u"滻",
u"浑": u"渾",
u"浒": u"滸",
u"浓": u"濃",
u"浔": u"潯",
u"涛": u"濤",
u"涝": u"澇",
u"涞": u"淶",
u"涟": u"漣",
u"涠": u"潿",
u"涡": u"渦",
u"涣": u"渙",
u"涤": u"滌",
u"润": u"潤",
u"涧": u"澗",
u"涨": u"漲",
u"涩": u"澀",
u"渊": u"淵",
u"渌": u"淥",
u"渍": u"漬",
u"渎": u"瀆",
u"渐": u"漸",
u"渑": u"澠",
u"渔": u"漁",
u"渖": u"瀋",
u"渗": u"滲",
u"温": u"溫",
u"湾": u"灣",
u"湿": u"濕",
u"溃": u"潰",
u"溅": u"濺",
u"溆": u"漵",
u"滗": u"潷",
u"滚": u"滾",
u"滞": u"滯",
u"滟": u"灧",
u"滠": u"灄",
u"满": u"滿",
u"滢": u"瀅",
u"滤": u"濾",
u"滥": u"濫",
u"滦": u"灤",
u"滨": u"濱",
u"滩": u"灘",
u"滪": u"澦",
u"漤": u"灠",
u"潆": u"瀠",
u"潇": u"瀟",
u"潋": u"瀲",
u"潍": u"濰",
u"潜": u"潛",
u"潴": u"瀦",
u"澜": u"瀾",
u"濑": u"瀨",
u"濒": u"瀕",
u"㲿": u"瀇",
u"灏": u"灝",
u"灭": u"滅",
u"灯": u"燈",
u"灵": u"靈",
u"灶": u"竈",
u"灾": u"災",
u"灿": u"燦",
u"炀": u"煬",
u"炉": u"爐",
u"炖": u"燉",
u"炜": u"煒",
u"炝": u"熗",
u"点": u"點",
u"炼": u"煉",
u"炽": u"熾",
u"烁": u"爍",
u"烂": u"爛",
u"烃": u"烴",
u"烛": u"燭",
u"烟": u"煙",
u"烦": u"煩",
u"烧": u"燒",
u"烨": u"燁",
u"烩": u"燴",
u"烫": u"燙",
u"烬": u"燼",
u"热": u"熱",
u"焕": u"煥",
u"焖": u"燜",
u"焘": u"燾",
u"㶽": u"煱",
u"煴": u"熅",
u"㶶": u"燶",
u"爱": u"愛",
u"爷": u"爺",
u"牍": u"牘",
u"牦": u"氂",
u"牵": u"牽",
u"牺": u"犧",
u"犊": u"犢",
u"状": u"狀",
u"犷": u"獷",
u"犸": u"獁",
u"犹": u"猶",
u"狈": u"狽",
u"狝": u"獮",
u"狞": u"獰",
u"独": u"獨",
u"狭": u"狹",
u"狮": u"獅",
u"狯": u"獪",
u"狰": u"猙",
u"狱": u"獄",
u"狲": u"猻",
u"猃": u"獫",
u"猎": u"獵",
u"猕": u"獼",
u"猡": u"玀",
u"猪": u"豬",
u"猫": u"貓",
u"猬": u"蝟",
u"献": u"獻",
u"獭": u"獺",
u"㺍": u"獱",
u"玑": u"璣",
u"玚": u"瑒",
u"玛": u"瑪",
u"玮": u"瑋",
u"环": u"環",
u"现": u"現",
u"玱": u"瑲",
u"玺": u"璽",
u"珐": u"琺",
u"珑": u"瓏",
u"珰": u"璫",
u"珲": u"琿",
u"琏": u"璉",
u"琐": u"瑣",
u"琼": u"瓊",
u"瑶": u"瑤",
u"瑷": u"璦",
u"璎": u"瓔",
u"瓒": u"瓚",
u"瓯": u"甌",
u"电": u"電",
u"画": u"畫",
u"畅": u"暢",
u"畴": u"疇",
u"疖": u"癤",
u"疗": u"療",
u"疟": u"瘧",
u"疠": u"癘",
u"疡": u"瘍",
u"疬": u"癧",
u"疭": u"瘲",
u"疮": u"瘡",
u"疯": u"瘋",
u"疱": u"皰",
u"疴": u"痾",
u"痈": u"癰",
u"痉": u"痙",
u"痒": u"癢",
u"痖": u"瘂",
u"痨": u"癆",
u"痪": u"瘓",
u"痫": u"癇",
u"瘅": u"癉",
u"瘆": u"瘮",
u"瘗": u"瘞",
u"瘪": u"癟",
u"瘫": u"癱",
u"瘾": u"癮",
u"瘿": u"癭",
u"癞": u"癩",
u"癣": u"癬",
u"癫": u"癲",
u"皑": u"皚",
u"皱": u"皺",
u"皲": u"皸",
u"盏": u"盞",
u"盐": u"鹽",
u"监": u"監",
u"盖": u"蓋",
u"盗": u"盜",
u"盘": u"盤",
u"眍": u"瞘",
u"眦": u"眥",
u"眬": u"矓",
u"着": u"著",
u"睁": u"睜",
u"睐": u"睞",
u"睑": u"瞼",
u"瞆": u"瞶",
u"瞒": u"瞞",
u"䁖": u"瞜",
u"瞩": u"矚",
u"矫": u"矯",
u"矶": u"磯",
u"矾": u"礬",
u"矿": u"礦",
u"砀": u"碭",
u"码": u"碼",
u"砖": u"磚",
u"砗": u"硨",
u"砚": u"硯",
u"砜": u"碸",
u"砺": u"礪",
u"砻": u"礱",
u"砾": u"礫",
u"础": u"礎",
u"硁": u"硜",
u"硕": u"碩",
u"硖": u"硤",
u"硗": u"磽",
u"硙": u"磑",
u"碍": u"礙",
u"碛": u"磧",
u"碜": u"磣",
u"碱": u"鹼",
u"礼": u"禮",
u"祃": u"禡",
u"祎": u"禕",
u"祢": u"禰",
u"祯": u"禎",
u"祷": u"禱",
u"祸": u"禍",
u"禀": u"稟",
u"禄": u"祿",
u"禅": u"禪",
u"离": u"離",
u"秃": u"禿",
u"秆": u"稈",
u"积": u"積",
u"称": u"稱",
u"秽": u"穢",
u"秾": u"穠",
u"稆": u"穭",
u"税": u"稅",
u"䅉": u"稏",
u"稣": u"穌",
u"稳": u"穩",
u"穑": u"穡",
u"穷": u"窮",
u"窃": u"竊",
u"窍": u"竅",
u"窎": u"窵",
u"窑": u"窯",
u"窜": u"竄",
u"窝": u"窩",
u"窥": u"窺",
u"窦": u"竇",
u"窭": u"窶",
u"竞": u"競",
u"笃": u"篤",
u"笋": u"筍",
u"笔": u"筆",
u"笕": u"筧",
u"笺": u"箋",
u"笼": u"籠",
u"笾": u"籩",
u"筚": u"篳",
u"筛": u"篩",
u"筜": u"簹",
u"筝": u"箏",
u"䇲": u"筴",
u"筹": u"籌",
u"筼": u"篔",
u"简": u"簡",
u"箓": u"籙",
u"箦": u"簀",
u"箧": u"篋",
u"箨": u"籜",
u"箩": u"籮",
u"箪": u"簞",
u"箫": u"簫",
u"篑": u"簣",
u"篓": u"簍",
u"篮": u"籃",
u"篱": u"籬",
u"簖": u"籪",
u"籁": u"籟",
u"籴": u"糴",
u"类": u"類",
u"籼": u"秈",
u"粜": u"糶",
u"粝": u"糲",
u"粤": u"粵",
u"粪": u"糞",
u"粮": u"糧",
u"糁": u"糝",
u"糇": u"餱",
u"紧": u"緊",
u"䌷": u"紬",
u"䌹": u"絅",
u"絷": u"縶",
u"䌼": u"綐",
u"䌽": u"綵",
u"䌸": u"縳",
u"䍁": u"繸",
u"䍀": u"繿",
u"纟": u"糹",
u"纠": u"糾",
u"纡": u"紆",
u"红": u"紅",
u"纣": u"紂",
u"纥": u"紇",
u"约": u"約",
u"级": u"級",
u"纨": u"紈",
u"纩": u"纊",
u"纪": u"紀",
u"纫": u"紉",
u"纬": u"緯",
u"纭": u"紜",
u"纮": u"紘",
u"纯": u"純",
u"纰": u"紕",
u"纱": u"紗",
u"纲": u"綱",
u"纳": u"納",
u"纴": u"紝",
u"纵": u"縱",
u"纶": u"綸",
u"纷": u"紛",
u"纸": u"紙",
u"纹": u"紋",
u"纺": u"紡",
u"纻": u"紵",
u"纼": u"紖",
u"纽": u"紐",
u"纾": u"紓",
u"绀": u"紺",
u"绁": u"紲",
u"绂": u"紱",
u"练": u"練",
u"组": u"組",
u"绅": u"紳",
u"细": u"細",
u"织": u"織",
u"终": u"終",
u"绉": u"縐",
u"绊": u"絆",
u"绋": u"紼",
u"绌": u"絀",
u"绍": u"紹",
u"绎": u"繹",
u"经": u"經",
u"绐": u"紿",
u"绑": u"綁",
u"绒": u"絨",
u"结": u"結",
u"绔": u"絝",
u"绕": u"繞",
u"绖": u"絰",
u"绗": u"絎",
u"绘": u"繪",
u"给": u"給",
u"绚": u"絢",
u"绛": u"絳",
u"络": u"絡",
u"绞": u"絞",
u"统": u"統",
u"绠": u"綆",
u"绡": u"綃",
u"绢": u"絹",
u"绤": u"綌",
u"绥": u"綏",
u"继": u"繼",
u"绨": u"綈",
u"绩": u"績",
u"绪": u"緒",
u"绫": u"綾",
u"绬": u"緓",
u"续": u"續",
u"绮": u"綺",
u"绯": u"緋",
u"绰": u"綽",
u"绲": u"緄",
u"绳": u"繩",
u"维": u"維",
u"绵": u"綿",
u"绶": u"綬",
u"绸": u"綢",
u"绹": u"綯",
u"绺": u"綹",
u"绻": u"綣",
u"综": u"綜",
u"绽": u"綻",
u"绾": u"綰",
u"缀": u"綴",
u"缁": u"緇",
u"缂": u"緙",
u"缃": u"緗",
u"缄": u"緘",
u"缅": u"緬",
u"缆": u"纜",
u"缇": u"緹",
u"缈": u"緲",
u"缉": u"緝",
u"缊": u"縕",
u"缋": u"繢",
u"缌": u"緦",
u"缍": u"綞",
u"缎": u"緞",
u"缏": u"緶",
u"缑": u"緱",
u"缒": u"縋",
u"缓": u"緩",
u"缔": u"締",
u"缕": u"縷",
u"编": u"編",
u"缗": u"緡",
u"缘": u"緣",
u"缙": u"縉",
u"缚": u"縛",
u"缛": u"縟",
u"缜": u"縝",
u"缝": u"縫",
u"缞": u"縗",
u"缟": u"縞",
u"缠": u"纏",
u"缡": u"縭",
u"缢": u"縊",
u"缣": u"縑",
u"缤": u"繽",
u"缥": u"縹",
u"缦": u"縵",
u"缧": u"縲",
u"缨": u"纓",
u"缩": u"縮",
u"缪": u"繆",
u"缫": u"繅",
u"缬": u"纈",
u"缭": u"繚",
u"缮": u"繕",
u"缯": u"繒",
u"缱": u"繾",
u"缲": u"繰",
u"缳": u"繯",
u"缴": u"繳",
u"缵": u"纘",
u"罂": u"罌",
u"网": u"網",
u"罗": u"羅",
u"罚": u"罰",
u"罢": u"罷",
u"罴": u"羆",
u"羁": u"羈",
u"羟": u"羥",
u"翘": u"翹",
u"耢": u"耮",
u"耧": u"耬",
u"耸": u"聳",
u"耻": u"恥",
u"聂": u"聶",
u"聋": u"聾",
u"职": u"職",
u"聍": u"聹",
u"联": u"聯",
u"聩": u"聵",
u"聪": u"聰",
u"肃": u"肅",
u"肠": u"腸",
u"肤": u"膚",
u"肮": u"骯",
u"肴": u"餚",
u"肾": u"腎",
u"肿": u"腫",
u"胀": u"脹",
u"胁": u"脅",
u"胆": u"膽",
u"胧": u"朧",
u"胨": u"腖",
u"胪": u"臚",
u"胫": u"脛",
u"胶": u"膠",
u"脉": u"脈",
u"脍": u"膾",
u"脐": u"臍",
u"脑": u"腦",
u"脓": u"膿",
u"脔": u"臠",
u"脚": u"腳",
u"脱": u"脫",
u"脶": u"腡",
u"脸": u"臉",
u"腭": u"齶",
u"腻": u"膩",
u"腼": u"靦",
u"腽": u"膃",
u"腾": u"騰",
u"膑": u"臏",
u"臜": u"臢",
u"舆": u"輿",
u"舣": u"艤",
u"舰": u"艦",
u"舱": u"艙",
u"舻": u"艫",
u"艰": u"艱",
u"艳": u"艷",
u"艺": u"藝",
u"节": u"節",
u"芈": u"羋",
u"芗": u"薌",
u"芜": u"蕪",
u"芦": u"蘆",
u"苁": u"蓯",
u"苇": u"葦",
u"苈": u"藶",
u"苋": u"莧",
u"苌": u"萇",
u"苍": u"蒼",
u"苎": u"苧",
u"茎": u"莖",
u"茏": u"蘢",
u"茑": u"蔦",
u"茔": u"塋",
u"茕": u"煢",
u"茧": u"繭",
u"荆": u"荊",
u"荐": u"薦",
u"荙": u"薘",
u"荚": u"莢",
u"荛": u"蕘",
u"荜": u"蓽",
u"荞": u"蕎",
u"荟": u"薈",
u"荠": u"薺",
u"荣": u"榮",
u"荤": u"葷",
u"荥": u"滎",
u"荦": u"犖",
u"荧": u"熒",
u"荨": u"蕁",
u"荩": u"藎",
u"荪": u"蓀",
u"荫": u"蔭",
u"荬": u"蕒",
u"荭": u"葒",
u"荮": u"葤",
u"莅": u"蒞",
u"莱": u"萊",
u"莲": u"蓮",
u"莳": u"蒔",
u"莴": u"萵",
u"莶": u"薟",
u"莸": u"蕕",
u"莹": u"瑩",
u"莺": u"鶯",
u"萝": u"蘿",
u"萤": u"螢",
u"营": u"營",
u"萦": u"縈",
u"萧": u"蕭",
u"萨": u"薩",
u"葱": u"蔥",
u"蒇": u"蕆",
u"蒉": u"蕢",
u"蒋": u"蔣",
u"蒌": u"蔞",
u"蓝": u"藍",
u"蓟": u"薊",
u"蓠": u"蘺",
u"蓣": u"蕷",
u"蓥": u"鎣",
u"蓦": u"驀",
u"蔂": u"虆",
u"蔷": u"薔",
u"蔹": u"蘞",
u"蔺": u"藺",
u"蔼": u"藹",
u"蕰": u"薀",
u"蕲": u"蘄",
u"薮": u"藪",
u"䓕": u"薳",
u"藓": u"蘚",
u"蘖": u"櫱",
u"虏": u"虜",
u"虑": u"慮",
u"虚": u"虛",
u"虬": u"虯",
u"虮": u"蟣",
u"虽": u"雖",
u"虾": u"蝦",
u"虿": u"蠆",
u"蚀": u"蝕",
u"蚁": u"蟻",
u"蚂": u"螞",
u"蚕": u"蠶",
u"蚬": u"蜆",
u"蛊": u"蠱",
u"蛎": u"蠣",
u"蛏": u"蟶",
u"蛮": u"蠻",
u"蛰": u"蟄",
u"蛱": u"蛺",
u"蛲": u"蟯",
u"蛳": u"螄",
u"蛴": u"蠐",
u"蜕": u"蛻",
u"蜗": u"蝸",
u"蝇": u"蠅",
u"蝈": u"蟈",
u"蝉": u"蟬",
u"蝼": u"螻",
u"蝾": u"蠑",
u"螀": u"螿",
u"螨": u"蟎",
u"䗖": u"螮",
u"蟏": u"蠨",
u"衅": u"釁",
u"衔": u"銜",
u"补": u"補",
u"衬": u"襯",
u"衮": u"袞",
u"袄": u"襖",
u"袅": u"裊",
u"袆": u"褘",
u"袜": u"襪",
u"袭": u"襲",
u"袯": u"襏",
u"装": u"裝",
u"裆": u"襠",
u"裈": u"褌",
u"裢": u"褳",
u"裣": u"襝",
u"裤": u"褲",
u"裥": u"襇",
u"褛": u"褸",
u"褴": u"襤",
u"䙓": u"襬",
u"见": u"見",
u"观": u"觀",
u"觃": u"覎",
u"规": u"規",
u"觅": u"覓",
u"视": u"視",
u"觇": u"覘",
u"览": u"覽",
u"觉": u"覺",
u"觊": u"覬",
u"觋": u"覡",
u"觌": u"覿",
u"觍": u"覥",
u"觎": u"覦",
u"觏": u"覯",
u"觐": u"覲",
u"觑": u"覷",
u"觞": u"觴",
u"触": u"觸",
u"觯": u"觶",
u"訚": u"誾",
u"䜣": u"訢",
u"誉": u"譽",
u"誊": u"謄",
u"䜧": u"譅",
u"讠": u"訁",
u"计": u"計",
u"订": u"訂",
u"讣": u"訃",
u"认": u"認",
u"讥": u"譏",
u"讦": u"訐",
u"讧": u"訌",
u"讨": u"討",
u"让": u"讓",
u"讪": u"訕",
u"讫": u"訖",
u"讬": u"託",
u"训": u"訓",
u"议": u"議",
u"讯": u"訊",
u"记": u"記",
u"讱": u"訒",
u"讲": u"講",
u"讳": u"諱",
u"讴": u"謳",
u"讵": u"詎",
u"讶": u"訝",
u"讷": u"訥",
u"许": u"許",
u"讹": u"訛",
u"论": u"論",
u"讻": u"訩",
u"讼": u"訟",
u"讽": u"諷",
u"设": u"設",
u"访": u"訪",
u"诀": u"訣",
u"证": u"證",
u"诂": u"詁",
u"诃": u"訶",
u"评": u"評",
u"诅": u"詛",
u"识": u"識",
u"诇": u"詗",
u"诈": u"詐",
u"诉": u"訴",
u"诊": u"診",
u"诋": u"詆",
u"诌": u"謅",
u"词": u"詞",
u"诎": u"詘",
u"诏": u"詔",
u"诐": u"詖",
u"译": u"譯",
u"诒": u"詒",
u"诓": u"誆",
u"诔": u"誄",
u"试": u"試",
u"诖": u"詿",
u"诗": u"詩",
u"诘": u"詰",
u"诙": u"詼",
u"诚": u"誠",
u"诛": u"誅",
u"诜": u"詵",
u"话": u"話",
u"诞": u"誕",
u"诟": u"詬",
u"诠": u"詮",
u"诡": u"詭",
u"询": u"詢",
u"诣": u"詣",
u"诤": u"諍",
u"该": u"該",
u"详": u"詳",
u"诧": u"詫",
u"诨": u"諢",
u"诩": u"詡",
u"诪": u"譸",
u"诫": u"誡",
u"诬": u"誣",
u"语": u"語",
u"诮": u"誚",
u"误": u"誤",
u"诰": u"誥",
u"诱": u"誘",
u"诲": u"誨",
u"诳": u"誑",
u"诵": u"誦",
u"诶": u"誒",
u"请": u"請",
u"诸": u"諸",
u"诹": u"諏",
u"诺": u"諾",
u"读": u"讀",
u"诼": u"諑",
u"诽": u"誹",
u"课": u"課",
u"诿": u"諉",
u"谀": u"諛",
u"谁": u"誰",
u"谂": u"諗",
u"调": u"調",
u"谄": u"諂",
u"谅": u"諒",
u"谆": u"諄",
u"谇": u"誶",
u"谈": u"談",
u"谊": u"誼",
u"谋": u"謀",
u"谌": u"諶",
u"谍": u"諜",
u"谎": u"謊",
u"谏": u"諫",
u"谐": u"諧",
u"谑": u"謔",
u"谒": u"謁",
u"谓": u"謂",
u"谔": u"諤",
u"谕": u"諭",
u"谖": u"諼",
u"谗": u"讒",
u"谘": u"諮",
u"谙": u"諳",
u"谚": u"諺",
u"谛": u"諦",
u"谜": u"謎",
u"谝": u"諞",
u"谞": u"諝",
u"谟": u"謨",
u"谠": u"讜",
u"谡": u"謖",
u"谢": u"謝",
u"谤": u"謗",
u"谥": u"謚",
u"谦": u"謙",
u"谧": u"謐",
u"谨": u"謹",
u"谩": u"謾",
u"谪": u"謫",
u"谬": u"謬",
u"谭": u"譚",
u"谮": u"譖",
u"谯": u"譙",
u"谰": u"讕",
u"谱": u"譜",
u"谲": u"譎",
u"谳": u"讞",
u"谴": u"譴",
u"谵": u"譫",
u"谶": u"讖",
u"豮": u"豶",
u"䝙": u"貙",
u"䞐": u"賰",
u"贝": u"貝",
u"贞": u"貞",
u"负": u"負",
u"贠": u"貟",
u"贡": u"貢",
u"财": u"財",
u"责": u"責",
u"贤": u"賢",
u"败": u"敗",
u"账": u"賬",
u"货": u"貨",
u"质": u"質",
u"贩": u"販",
u"贪": u"貪",
u"贫": u"貧",
u"贬": u"貶",
u"购": u"購",
u"贮": u"貯",
u"贯": u"貫",
u"贰": u"貳",
u"贱": u"賤",
u"贲": u"賁",
u"贳": u"貰",
u"贴": u"貼",
u"贵": u"貴",
u"贶": u"貺",
u"贷": u"貸",
u"贸": u"貿",
u"费": u"費",
u"贺": u"賀",
u"贻": u"貽",
u"贼": u"賊",
u"贽": u"贄",
u"贾": u"賈",
u"贿": u"賄",
u"赀": u"貲",
u"赁": u"賃",
u"赂": u"賂",
u"资": u"資",
u"赅": u"賅",
u"赆": u"贐",
u"赇": u"賕",
u"赈": u"賑",
u"赉": u"賚",
u"赊": u"賒",
u"赋": u"賦",
u"赌": u"賭",
u"赎": u"贖",
u"赏": u"賞",
u"赐": u"賜",
u"赑": u"贔",
u"赒": u"賙",
u"赓": u"賡",
u"赔": u"賠",
u"赕": u"賧",
u"赖": u"賴",
u"赗": u"賵",
u"赘": u"贅",
u"赙": u"賻",
u"赚": u"賺",
u"赛": u"賽",
u"赜": u"賾",
u"赞": u"贊",
u"赟": u"贇",
u"赠": u"贈",
u"赡": u"贍",
u"赢": u"贏",
u"赣": u"贛",
u"赪": u"赬",
u"赵": u"趙",
u"赶": u"趕",
u"趋": u"趨",
u"趱": u"趲",
u"趸": u"躉",
u"跃": u"躍",
u"跄": u"蹌",
u"跞": u"躒",
u"践": u"踐",
u"跶": u"躂",
u"跷": u"蹺",
u"跸": u"蹕",
u"跹": u"躚",
u"跻": u"躋",
u"踊": u"踴",
u"踌": u"躊",
u"踪": u"蹤",
u"踬": u"躓",
u"踯": u"躑",
u"蹑": u"躡",
u"蹒": u"蹣",
u"蹰": u"躕",
u"蹿": u"躥",
u"躏": u"躪",
u"躜": u"躦",
u"躯": u"軀",
u"车": u"車",
u"轧": u"軋",
u"轨": u"軌",
u"轩": u"軒",
u"轪": u"軑",
u"轫": u"軔",
u"转": u"轉",
u"轭": u"軛",
u"轮": u"輪",
u"软": u"軟",
u"轰": u"轟",
u"轱": u"軲",
u"轲": u"軻",
u"轳": u"轤",
u"轴": u"軸",
u"轵": u"軹",
u"轶": u"軼",
u"轷": u"軤",
u"轸": u"軫",
u"轹": u"轢",
u"轺": u"軺",
u"轻": u"輕",
u"轼": u"軾",
u"载": u"載",
u"轾": u"輊",
u"轿": u"轎",
u"辀": u"輈",
u"辁": u"輇",
u"辂": u"輅",
u"较": u"較",
u"辄": u"輒",
u"辅": u"輔",
u"辆": u"輛",
u"辇": u"輦",
u"辈": u"輩",
u"辉": u"輝",
u"辊": u"輥",
u"辋": u"輞",
u"辌": u"輬",
u"辍": u"輟",
u"辎": u"輜",
u"辏": u"輳",
u"辐": u"輻",
u"辑": u"輯",
u"辒": u"轀",
u"输": u"輸",
u"辔": u"轡",
u"辕": u"轅",
u"辖": u"轄",
u"辗": u"輾",
u"辘": u"轆",
u"辙": u"轍",
u"辚": u"轔",
u"辞": u"辭",
u"辩": u"辯",
u"辫": u"辮",
u"边": u"邊",
u"辽": u"遼",
u"达": u"達",
u"迁": u"遷",
u"过": u"過",
u"迈": u"邁",
u"运": u"運",
u"还": u"還",
u"这": u"這",
u"进": u"進",
u"远": u"遠",
u"违": u"違",
u"连": u"連",
u"迟": u"遲",
u"迩": u"邇",
u"迳": u"逕",
u"迹": u"跡",
u"选": u"選",
u"逊": u"遜",
u"递": u"遞",
u"逦": u"邐",
u"逻": u"邏",
u"遗": u"遺",
u"遥": u"遙",
u"邓": u"鄧",
u"邝": u"鄺",
u"邬": u"鄔",
u"邮": u"郵",
u"邹": u"鄒",
u"邺": u"鄴",
u"邻": u"鄰",
u"郏": u"郟",
u"郐": u"鄶",
u"郑": u"鄭",
u"郓": u"鄆",
u"郦": u"酈",
u"郧": u"鄖",
u"郸": u"鄲",
u"酂": u"酇",
u"酦": u"醱",
u"酱": u"醬",
u"酽": u"釅",
u"酾": u"釃",
u"酿": u"釀",
u"释": u"釋",
u"鉴": u"鑒",
u"銮": u"鑾",
u"錾": u"鏨",
u"𨱏": u"鎝",
u"钅": u"釒",
u"钆": u"釓",
u"钇": u"釔",
u"针": u"針",
u"钉": u"釘",
u"钊": u"釗",
u"钋": u"釙",
u"钌": u"釕",
u"钍": u"釷",
u"钎": u"釺",
u"钏": u"釧",
u"钐": u"釤",
u"钑": u"鈒",
u"钒": u"釩",
u"钓": u"釣",
u"钔": u"鍆",
u"钕": u"釹",
u"钖": u"鍚",
u"钗": u"釵",
u"钘": u"鈃",
u"钙": u"鈣",
u"钚": u"鈈",
u"钛": u"鈦",
u"钜": u"鉅",
u"钝": u"鈍",
u"钞": u"鈔",
u"钠": u"鈉",
u"钡": u"鋇",
u"钢": u"鋼",
u"钣": u"鈑",
u"钤": u"鈐",
u"钥": u"鑰",
u"钦": u"欽",
u"钧": u"鈞",
u"钨": u"鎢",
u"钪": u"鈧",
u"钫": u"鈁",
u"钬": u"鈥",
u"钭": u"鈄",
u"钮": u"鈕",
u"钯": u"鈀",
u"钰": u"鈺",
u"钱": u"錢",
u"钲": u"鉦",
u"钳": u"鉗",
u"钴": u"鈷",
u"钶": u"鈳",
u"钷": u"鉕",
u"钸": u"鈽",
u"钹": u"鈸",
u"钺": u"鉞",
u"钻": u"鑽",
u"钼": u"鉬",
u"钽": u"鉭",
u"钾": u"鉀",
u"钿": u"鈿",
u"铀": u"鈾",
u"铁": u"鐵",
u"铂": u"鉑",
u"铃": u"鈴",
u"铄": u"鑠",
u"铅": u"鉛",
u"铆": u"鉚",
u"铇": u"鉋",
u"铈": u"鈰",
u"铉": u"鉉",
u"铊": u"鉈",
u"铋": u"鉍",
u"铌": u"鈮",
u"铍": u"鈹",
u"铎": u"鐸",
u"铏": u"鉶",
u"铐": u"銬",
u"铑": u"銠",
u"铒": u"鉺",
u"铓": u"鋩",
u"铔": u"錏",
u"铕": u"銪",
u"铖": u"鋮",
u"铗": u"鋏",
u"铘": u"鋣",
u"铙": u"鐃",
u"铚": u"銍",
u"铛": u"鐺",
u"铜": u"銅",
u"铝": u"鋁",
u"铞": u"銱",
u"铟": u"銦",
u"铠": u"鎧",
u"铡": u"鍘",
u"铢": u"銖",
u"铣": u"銑",
u"铤": u"鋌",
u"铥": u"銩",
u"铦": u"銛",
u"铧": u"鏵",
u"铨": u"銓",
u"铩": u"鎩",
u"铪": u"鉿",
u"铫": u"銚",
u"铬": u"鉻",
u"铭": u"銘",
u"铮": u"錚",
u"铯": u"銫",
u"铰": u"鉸",
u"铱": u"銥",
u"铲": u"鏟",
u"铳": u"銃",
u"铴": u"鐋",
u"铵": u"銨",
u"银": u"銀",
u"铷": u"銣",
u"铸": u"鑄",
u"铹": u"鐒",
u"铺": u"鋪",
u"铻": u"鋙",
u"铼": u"錸",
u"铽": u"鋱",
u"链": u"鏈",
u"铿": u"鏗",
u"销": u"銷",
u"锁": u"鎖",
u"锂": u"鋰",
u"锃": u"鋥",
u"锄": u"鋤",
u"锅": u"鍋",
u"锆": u"鋯",
u"锇": u"鋨",
u"锉": u"銼",
u"锊": u"鋝",
u"锋": u"鋒",
u"锌": u"鋅",
u"锍": u"鋶",
u"锎": u"鐦",
u"锏": u"鐧",
u"锑": u"銻",
u"锒": u"鋃",
u"锓": u"鋟",
u"锔": u"鋦",
u"锕": u"錒",
u"锖": u"錆",
u"锗": u"鍺",
u"锘": u"鍩",
u"错": u"錯",
u"锚": u"錨",
u"锛": u"錛",
u"锜": u"錡",
u"锝": u"鍀",
u"锞": u"錁",
u"锟": u"錕",
u"锠": u"錩",
u"锡": u"錫",
u"锢": u"錮",
u"锣": u"鑼",
u"锥": u"錐",
u"锦": u"錦",
u"锧": u"鑕",
u"锩": u"錈",
u"锪": u"鍃",
u"锫": u"錇",
u"锬": u"錟",
u"锭": u"錠",
u"键": u"鍵",
u"锯": u"鋸",
u"锰": u"錳",
u"锱": u"錙",
u"锲": u"鍥",
u"锳": u"鍈",
u"锴": u"鍇",
u"锵": u"鏘",
u"锶": u"鍶",
u"锷": u"鍔",
u"锸": u"鍤",
u"锹": u"鍬",
u"锺": u"鍾",
u"锻": u"鍛",
u"锼": u"鎪",
u"锽": u"鍠",
u"锾": u"鍰",
u"锿": u"鎄",
u"镀": u"鍍",
u"镁": u"鎂",
u"镂": u"鏤",
u"镃": u"鎡",
u"镄": u"鐨",
u"镅": u"鎇",
u"镆": u"鏌",
u"镇": u"鎮",
u"镈": u"鎛",
u"镉": u"鎘",
u"镊": u"鑷",
u"镋": u"鎲",
u"镍": u"鎳",
u"镎": u"鎿",
u"镏": u"鎦",
u"镐": u"鎬",
u"镑": u"鎊",
u"镒": u"鎰",
u"镓": u"鎵",
u"镔": u"鑌",
u"镕": u"鎔",
u"镖": u"鏢",
u"镗": u"鏜",
u"镘": u"鏝",
u"镙": u"鏍",
u"镚": u"鏰",
u"镛": u"鏞",
u"镜": u"鏡",
u"镝": u"鏑",
u"镞": u"鏃",
u"镟": u"鏇",
u"镠": u"鏐",
u"镡": u"鐔",
u"镣": u"鐐",
u"镤": u"鏷",
u"镥": u"鑥",
u"镦": u"鐓",
u"镧": u"鑭",
u"镨": u"鐠",
u"镩": u"鑹",
u"镪": u"鏹",
u"镫": u"鐙",
u"镬": u"鑊",
u"镭": u"鐳",
u"镮": u"鐶",
u"镯": u"鐲",
u"镰": u"鐮",
u"镱": u"鐿",
u"镲": u"鑔",
u"镳": u"鑣",
u"镴": u"鑞",
u"镵": u"鑱",
u"镶": u"鑲",
u"长": u"長",
u"门": u"門",
u"闩": u"閂",
u"闪": u"閃",
u"闫": u"閆",
u"闬": u"閈",
u"闭": u"閉",
u"问": u"問",
u"闯": u"闖",
u"闰": u"閏",
u"闱": u"闈",
u"闲": u"閑",
u"闳": u"閎",
u"间": u"間",
u"闵": u"閔",
u"闶": u"閌",
u"闷": u"悶",
u"闸": u"閘",
u"闹": u"鬧",
u"闺": u"閨",
u"闻": u"聞",
u"闼": u"闥",
u"闽": u"閩",
u"闾": u"閭",
u"闿": u"闓",
u"阀": u"閥",
u"阁": u"閣",
u"阂": u"閡",
u"阃": u"閫",
u"阄": u"鬮",
u"阆": u"閬",
u"阇": u"闍",
u"阈": u"閾",
u"阉": u"閹",
u"阊": u"閶",
u"阋": u"鬩",
u"阌": u"閿",
u"阍": u"閽",
u"阎": u"閻",
u"阏": u"閼",
u"阐": u"闡",
u"阑": u"闌",
u"阒": u"闃",
u"阓": u"闠",
u"阔": u"闊",
u"阕": u"闋",
u"阖": u"闔",
u"阗": u"闐",
u"阘": u"闒",
u"阙": u"闕",
u"阚": u"闞",
u"阛": u"闤",
u"队": u"隊",
u"阳": u"陽",
u"阴": u"陰",
u"阵": u"陣",
u"阶": u"階",
u"际": u"際",
u"陆": u"陸",
u"陇": u"隴",
u"陈": u"陳",
u"陉": u"陘",
u"陕": u"陝",
u"陧": u"隉",
u"陨": u"隕",
u"险": u"險",
u"随": u"隨",
u"隐": u"隱",
u"隶": u"隸",
u"隽": u"雋",
u"难": u"難",
u"雏": u"雛",
u"雠": u"讎",
u"雳": u"靂",
u"雾": u"霧",
u"霁": u"霽",
u"霡": u"霢",
u"霭": u"靄",
u"靓": u"靚",
u"静": u"靜",
u"靥": u"靨",
u"䩄": u"靦",
u"鞑": u"韃",
u"鞒": u"鞽",
u"鞯": u"韉",
u"韦": u"韋",
u"韧": u"韌",
u"韨": u"韍",
u"韩": u"韓",
u"韪": u"韙",
u"韫": u"韞",
u"韬": u"韜",
u"韵": u"韻",
u"页": u"頁",
u"顶": u"頂",
u"顷": u"頃",
u"顸": u"頇",
u"项": u"項",
u"顺": u"順",
u"顼": u"頊",
u"顽": u"頑",
u"顾": u"顧",
u"顿": u"頓",
u"颀": u"頎",
u"颁": u"頒",
u"颂": u"頌",
u"颃": u"頏",
u"预": u"預",
u"颅": u"顱",
u"领": u"領",
u"颇": u"頗",
u"颈": u"頸",
u"颉": u"頡",
u"颊": u"頰",
u"颋": u"頲",
u"颌": u"頜",
u"颍": u"潁",
u"颎": u"熲",
u"颏": u"頦",
u"颐": u"頤",
u"频": u"頻",
u"颒": u"頮",
u"颔": u"頷",
u"颕": u"頴",
u"颖": u"穎",
u"颗": u"顆",
u"题": u"題",
u"颙": u"顒",
u"颚": u"顎",
u"颛": u"顓",
u"额": u"額",
u"颞": u"顳",
u"颟": u"顢",
u"颠": u"顛",
u"颡": u"顙",
u"颢": u"顥",
u"颤": u"顫",
u"颥": u"顬",
u"颦": u"顰",
u"颧": u"顴",
u"风": u"風",
u"飏": u"颺",
u"飐": u"颭",
u"飑": u"颮",
u"飒": u"颯",
u"飓": u"颶",
u"飔": u"颸",
u"飕": u"颼",
u"飖": u"颻",
u"飗": u"飀",
u"飘": u"飄",
u"飙": u"飆",
u"飚": u"飈",
u"飞": u"飛",
u"飨": u"饗",
u"餍": u"饜",
u"饣": u"飠",
u"饤": u"飣",
u"饦": u"飥",
u"饧": u"餳",
u"饨": u"飩",
u"饩": u"餼",
u"饪": u"飪",
u"饫": u"飫",
u"饬": u"飭",
u"饭": u"飯",
u"饮": u"飲",
u"饯": u"餞",
u"饰": u"飾",
u"饱": u"飽",
u"饲": u"飼",
u"饳": u"飿",
u"饴": u"飴",
u"饵": u"餌",
u"饶": u"饒",
u"饷": u"餉",
u"饸": u"餄",
u"饹": u"餎",
u"饺": u"餃",
u"饻": u"餏",
u"饼": u"餅",
u"饽": u"餑",
u"饾": u"餖",
u"饿": u"餓",
u"馀": u"餘",
u"馁": u"餒",
u"馂": u"餕",
u"馃": u"餜",
u"馄": u"餛",
u"馅": u"餡",
u"馆": u"館",
u"馇": u"餷",
u"馈": u"饋",
u"馉": u"餶",
u"馊": u"餿",
u"馋": u"饞",
u"馌": u"饁",
u"馍": u"饃",
u"馎": u"餺",
u"馏": u"餾",
u"馐": u"饈",
u"馑": u"饉",
u"馒": u"饅",
u"馓": u"饊",
u"馔": u"饌",
u"馕": u"饢",
u"䯄": u"騧",
u"马": u"馬",
u"驭": u"馭",
u"驮": u"馱",
u"驯": u"馴",
u"驰": u"馳",
u"驱": u"驅",
u"驲": u"馹",
u"驳": u"駁",
u"驴": u"驢",
u"驵": u"駔",
u"驶": u"駛",
u"驷": u"駟",
u"驸": u"駙",
u"驹": u"駒",
u"驺": u"騶",
u"驻": u"駐",
u"驼": u"駝",
u"驽": u"駑",
u"驾": u"駕",
u"驿": u"驛",
u"骀": u"駘",
u"骁": u"驍",
u"骃": u"駰",
u"骄": u"驕",
u"骅": u"驊",
u"骆": u"駱",
u"骇": u"駭",
u"骈": u"駢",
u"骉": u"驫",
u"骊": u"驪",
u"骋": u"騁",
u"验": u"驗",
u"骍": u"騂",
u"骎": u"駸",
u"骏": u"駿",
u"骐": u"騏",
u"骑": u"騎",
u"骒": u"騍",
u"骓": u"騅",
u"骔": u"騌",
u"骕": u"驌",
u"骖": u"驂",
u"骗": u"騙",
u"骘": u"騭",
u"骙": u"騤",
u"骚": u"騷",
u"骛": u"騖",
u"骜": u"驁",
u"骝": u"騮",
u"骞": u"騫",
u"骟": u"騸",
u"骠": u"驃",
u"骡": u"騾",
u"骢": u"驄",
u"骣": u"驏",
u"骤": u"驟",
u"骥": u"驥",
u"骦": u"驦",
u"骧": u"驤",
u"髅": u"髏",
u"髋": u"髖",
u"髌": u"髕",
u"鬓": u"鬢",
u"魇": u"魘",
u"魉": u"魎",
u"鱼": u"魚",
u"鱽": u"魛",
u"鱾": u"魢",
u"鱿": u"魷",
u"鲀": u"魨",
u"鲁": u"魯",
u"鲂": u"魴",
u"鲃": u"䰾",
u"鲄": u"魺",
u"鲅": u"鮁",
u"鲆": u"鮃",
u"鲈": u"鱸",
u"鲉": u"鮋",
u"鲊": u"鮓",
u"鲋": u"鮒",
u"鲌": u"鮊",
u"鲍": u"鮑",
u"鲎": u"鱟",
u"鲏": u"鮍",
u"鲐": u"鮐",
u"鲑": u"鮭",
u"鲒": u"鮚",
u"鲓": u"鮳",
u"鲔": u"鮪",
u"鲕": u"鮞",
u"鲖": u"鮦",
u"鲗": u"鰂",
u"鲘": u"鮜",
u"鲙": u"鱠",
u"鲚": u"鱭",
u"鲛": u"鮫",
u"鲜": u"鮮",
u"鲝": u"鮺",
u"鲟": u"鱘",
u"鲠": u"鯁",
u"鲡": u"鱺",
u"鲢": u"鰱",
u"鲣": u"鰹",
u"鲤": u"鯉",
u"鲥": u"鰣",
u"鲦": u"鰷",
u"鲧": u"鯀",
u"鲨": u"鯊",
u"鲩": u"鯇",
u"鲪": u"鮶",
u"鲫": u"鯽",
u"鲬": u"鯒",
u"鲭": u"鯖",
u"鲮": u"鯪",
u"鲯": u"鯕",
u"鲰": u"鯫",
u"鲱": u"鯡",
u"鲲": u"鯤",
u"鲳": u"鯧",
u"鲴": u"鯝",
u"鲵": u"鯢",
u"鲶": u"鯰",
u"鲷": u"鯛",
u"鲸": u"鯨",
u"鲹": u"鰺",
u"鲺": u"鯴",
u"鲻": u"鯔",
u"鲼": u"鱝",
u"鲽": u"鰈",
u"鲾": u"鰏",
u"鲿": u"鱨",
u"鳀": u"鯷",
u"鳁": u"鰮",
u"鳂": u"鰃",
u"鳃": u"鰓",
u"鳅": u"鰍",
u"鳆": u"鰒",
u"鳇": u"鰉",
u"鳈": u"鰁",
u"鳉": u"鱂",
u"鳊": u"鯿",
u"鳋": u"鰠",
u"鳌": u"鰲",
u"鳍": u"鰭",
u"鳎": u"鰨",
u"鳏": u"鰥",
u"鳐": u"鰩",
u"鳑": u"鰟",
u"鳒": u"鰜",
u"鳓": u"鰳",
u"鳔": u"鰾",
u"鳕": u"鱈",
u"鳖": u"鱉",
u"鳗": u"鰻",
u"鳘": u"鰵",
u"鳙": u"鱅",
u"鳚": u"䲁",
u"鳛": u"鰼",
u"鳜": u"鱖",
u"鳝": u"鱔",
u"鳞": u"鱗",
u"鳟": u"鱒",
u"鳠": u"鱯",
u"鳡": u"鱤",
u"鳢": u"鱧",
u"鳣": u"鱣",
u"䴓": u"鳾",
u"䴕": u"鴷",
u"䴔": u"鵁",
u"䴖": u"鶄",
u"䴗": u"鶪",
u"䴘": u"鷈",
u"䴙": u"鷿",
u"㶉": u"鸂",
u"鸟": u"鳥",
u"鸠": u"鳩",
u"鸢": u"鳶",
u"鸣": u"鳴",
u"鸤": u"鳲",
u"鸥": u"鷗",
u"鸦": u"鴉",
u"鸧": u"鶬",
u"鸨": u"鴇",
u"鸩": u"鴆",
u"鸪": u"鴣",
u"鸫": u"鶇",
u"鸬": u"鸕",
u"鸭": u"鴨",
u"鸮": u"鴞",
u"鸯": u"鴦",
u"鸰": u"鴒",
u"鸱": u"鴟",
u"鸲": u"鴝",
u"鸳": u"鴛",
u"鸴": u"鷽",
u"鸵": u"鴕",
u"鸶": u"鷥",
u"鸷": u"鷙",
u"鸸": u"鴯",
u"鸹": u"鴰",
u"鸺": u"鵂",
u"鸻": u"鴴",
u"鸼": u"鵃",
u"鸽": u"鴿",
u"鸾": u"鸞",
u"鸿": u"鴻",
u"鹀": u"鵐",
u"鹁": u"鵓",
u"鹂": u"鸝",
u"鹃": u"鵑",
u"鹄": u"鵠",
u"鹅": u"鵝",
u"鹆": u"鵒",
u"鹇": u"鷳",
u"鹈": u"鵜",
u"鹉": u"鵡",
u"鹊": u"鵲",
u"鹋": u"鶓",
u"鹌": u"鵪",
u"鹍": u"鵾",
u"鹎": u"鵯",
u"鹏": u"鵬",
u"鹐": u"鵮",
u"鹑": u"鶉",
u"鹒": u"鶊",
u"鹓": u"鵷",
u"鹔": u"鷫",
u"鹕": u"鶘",
u"鹖": u"鶡",
u"鹗": u"鶚",
u"鹘": u"鶻",
u"鹙": u"鶖",
u"鹛": u"鶥",
u"鹜": u"鶩",
u"鹝": u"鷊",
u"鹞": u"鷂",
u"鹟": u"鶲",
u"鹠": u"鶹",
u"鹡": u"鶺",
u"鹢": u"鷁",
u"鹣": u"鶼",
u"鹤": u"鶴",
u"鹥": u"鷖",
u"鹦": u"鸚",
u"鹧": u"鷓",
u"鹨": u"鷚",
u"鹩": u"鷯",
u"鹪": u"鷦",
u"鹫": u"鷲",
u"鹬": u"鷸",
u"鹭": u"鷺",
u"鹯": u"鸇",
u"鹰": u"鷹",
u"鹱": u"鸌",
u"鹲": u"鸏",
u"鹳": u"鸛",
u"鹴": u"鸘",
u"鹾": u"鹺",
u"麦": u"麥",
u"麸": u"麩",
u"黄": u"黃",
u"黉": u"黌",
u"黡": u"黶",
u"黩": u"黷",
u"黪": u"黲",
u"黾": u"黽",
u"鼋": u"黿",
u"鼍": u"鼉",
u"鼗": u"鞀",
u"鼹": u"鼴",
u"齐": u"齊",
u"齑": u"齏",
u"齿": u"齒",
u"龀": u"齔",
u"龁": u"齕",
u"龂": u"齗",
u"龃": u"齟",
u"龄": u"齡",
u"龅": u"齙",
u"龆": u"齠",
u"龇": u"齜",
u"龈": u"齦",
u"龉": u"齬",
u"龊": u"齪",
u"龋": u"齲",
u"龌": u"齷",
u"龙": u"龍",
u"龚": u"龔",
u"龛": u"龕",
u"龟": u"龜",
u"一伙": u"一伙",
u"一并": u"一併",
u"一准": u"一准",
u"一划": u"一划",
u"一地里": u"一地裡",
u"一干": u"一干",
u"一树百获": u"一樹百穫",
u"一台": u"一臺",
u"一冲": u"一衝",
u"一只": u"一隻",
u"一发千钧": u"一髮千鈞",
u"一出": u"一齣",
u"七只": u"七隻",
u"三元里": u"三元裡",
u"三国志": u"三國誌",
u"三复": u"三複",
u"三只": u"三隻",
u"上吊": u"上吊",
u"上台": u"上臺",
u"下不了台": u"下不了臺",
u"下台": u"下臺",
u"下面": u"下麵",
u"不准": u"不准",
u"不吊": u"不吊",
u"不知就里": u"不知就裡",
u"不知所云": u"不知所云",
u"不锈钢": u"不鏽鋼",
u"丑剧": u"丑劇",
u"丑旦": u"丑旦",
u"丑角": u"丑角",
u"并存着": u"並存著",
u"中岳": u"中嶽",
u"中台医专": u"中臺醫專",
u"丰南": u"丰南",
u"丰台": u"丰台",
u"丰姿": u"丰姿",
u"丰采": u"丰采",
u"丰韵": u"丰韻",
u"主干": u"主幹",
u"么么唱唱": u"么么唱唱",
u"么儿": u"么兒",
u"么喝": u"么喝",
u"么妹": u"么妹",
u"么弟": u"么弟",
u"么爷": u"么爺",
u"九世之雠": u"九世之讎",
u"九只": u"九隻",
u"干丝": u"乾絲",
u"干着急": u"乾著急",
u"乱发": u"亂髮",
u"云云": u"云云",
u"云尔": u"云爾",
u"五岳": u"五嶽",
u"五斗柜": u"五斗櫃",
u"五斗橱": u"五斗櫥",
u"五谷": u"五穀",
u"五行生克": u"五行生剋",
u"五只": u"五隻",
u"五出": u"五齣",
u"交卷": u"交卷",
u"人云亦云": u"人云亦云",
u"人物志": u"人物誌",
u"什锦面": u"什錦麵",
u"什么": u"什麼",
u"仆倒": u"仆倒",
u"介系词": u"介係詞",
u"介系词": u"介繫詞",
u"仿制": u"仿製",
u"伙伕": u"伙伕",
u"伙伴": u"伙伴",
u"伙同": u"伙同",
u"伙夫": u"伙夫",
u"伙房": u"伙房",
u"伙计": u"伙計",
u"伙食": u"伙食",
u"布下": u"佈下",
u"布告": u"佈告",
u"布哨": u"佈哨",
u"布局": u"佈局",
u"布岗": u"佈崗",
u"布施": u"佈施",
u"布景": u"佈景",
u"布满": u"佈滿",
u"布线": u"佈線",
u"布置": u"佈置",
u"布署": u"佈署",
u"布道": u"佈道",
u"布达": u"佈達",
u"布防": u"佈防",
u"布阵": u"佈陣",
u"布雷": u"佈雷",
u"体育锻鍊": u"体育鍛鍊",
u"何干": u"何干",
u"作准": u"作准",
u"佣人": u"佣人",
u"佣工": u"佣工",
u"佣金": u"佣金",
u"并入": u"併入",
u"并列": u"併列",
u"并到": u"併到",
u"并合": u"併合",
u"并吞": u"併吞",
u"并在": u"併在",
u"并成": u"併成",
u"并排": u"併排",
u"并拢": u"併攏",
u"并案": u"併案",
u"并为": u"併為",
u"并发": u"併發",
u"并科": u"併科",
u"并购": u"併購",
u"并进": u"併進",
u"来复": u"來複",
u"供制": u"供製",
u"依依不舍": u"依依不捨",
u"侵并": u"侵併",
u"便辟": u"便辟",
u"系数": u"係數",
u"系为": u"係為",
u"保险柜": u"保險柜",
u"信号台": u"信號臺",
u"修复": u"修複",
u"修胡刀": u"修鬍刀",
u"俯冲": u"俯衝",
u"个里": u"個裡",
u"借着": u"借著",
u"假发": u"假髮",
u"停制": u"停製",
u"偷鸡不着": u"偷雞不著",
u"家伙": u"傢伙",
u"家俱": u"傢俱",
u"家具": u"傢具",
u"传布": u"傳佈",
u"债台高筑": u"債臺高築",
u"傻里傻气": u"傻裡傻氣",
u"倾家荡产": u"傾家蕩產",
u"倾复": u"傾複",
u"倾复": u"傾覆",
u"僱佣": u"僱佣",
u"仪表": u"儀錶",
u"亿只": u"億隻",
u"尽尽": u"儘儘",
u"尽先": u"儘先",
u"尽其所有": u"儘其所有",
u"尽力": u"儘力",
u"尽快": u"儘快",
u"尽早": u"儘早",
u"尽是": u"儘是",
u"尽管": u"儘管",
u"尽速": u"儘速",
u"尽量": u"儘量",
u"允准": u"允准",
u"兄台": u"兄臺",
u"充饥": u"充饑",
u"光采": u"光采",
u"克里": u"克裡",
u"克复": u"克複",
u"入伙": u"入伙",
u"内制": u"內製",
u"两只": u"兩隻",
u"八字胡": u"八字鬍",
u"八只": u"八隻",
u"公布": u"公佈",
u"公干": u"公幹",
u"公斗": u"公斗",
u"公历": u"公曆",
u"六只": u"六隻",
u"六出": u"六齣",
u"兼并": u"兼併",
u"冤雠": u"冤讎",
u"准予": u"准予",
u"准假": u"准假",
u"准将": u"准將",
u"准考证": u"准考證",
u"准许": u"准許",
u"几几": u"几几",
u"几案": u"几案",
u"几丝": u"几絲",
u"凹洞里": u"凹洞裡",
u"出征": u"出征",
u"出锤": u"出鎚",
u"刀削面": u"刀削麵",
u"刁斗": u"刁斗",
u"分布": u"分佈",
u"切面": u"切麵",
u"刊布": u"刊佈",
u"划上": u"划上",
u"划下": u"划下",
u"划不来": u"划不來",
u"划了": u"划了",
u"划具": u"划具",
u"划出": u"划出",
u"划到": u"划到",
u"划动": u"划動",
u"划去": u"划去",
u"划子": u"划子",
u"划得来": u"划得來",
u"划拳": u"划拳",
u"划桨": u"划槳",
u"划水": u"划水",
u"划算": u"划算",
u"划船": u"划船",
u"划艇": u"划艇",
u"划着": u"划著",
u"划着走": u"划著走",
u"划行": u"划行",
u"划走": u"划走",
u"划起": u"划起",
u"划进": u"划進",
u"划过": u"划過",
u"初征": u"初征",
u"别致": u"別緻",
u"别着": u"別著",
u"别只": u"別隻",
u"利比里亚": u"利比裡亞",
u"刮着": u"刮著",
u"刮胡刀": u"刮鬍刀",
u"剃发": u"剃髮",
u"剃须": u"剃鬚",
u"削发": u"削髮",
u"克制": u"剋制",
u"克星": u"剋星",
u"克服": u"剋服",
u"克死": u"剋死",
u"克薄": u"剋薄",
u"前仆后继": u"前仆後繼",
u"前台": u"前臺",
u"前车之复": u"前車之覆",
u"刚才": u"剛纔",
u"剪发": u"剪髮",
u"割舍": u"割捨",
u"创制": u"創製",
u"加里宁": u"加裡寧",
u"动荡": u"動蕩",
u"劳力士表": u"勞力士錶",
u"包准": u"包准",
u"包谷": u"包穀",
u"北斗": u"北斗",
u"北回": u"北迴",
u"匡复": u"匡複",
u"匪干": u"匪幹",
u"十卷": u"十卷",
u"十台": u"十臺",
u"十只": u"十隻",
u"十出": u"十齣",
u"千丝万缕": u"千絲萬縷",
u"千回百折": u"千迴百折",
u"千回百转": u"千迴百轉",
u"千钧一发": u"千鈞一髮",
u"千只": u"千隻",
u"升斗小民": u"升斗小民",
u"半只": u"半隻",
u"南岳": u"南嶽",
u"南征": u"南征",
u"南台": u"南臺",
u"南回": u"南迴",
u"卡里": u"卡裡",
u"印制": u"印製",
u"卷入": u"卷入",
u"卷取": u"卷取",
u"卷土重来": u"卷土重來",
u"卷子": u"卷子",
u"卷宗": u"卷宗",
u"卷尺": u"卷尺",
u"卷层云": u"卷層雲",
u"卷帙": u"卷帙",
u"卷扬机": u"卷揚機",
u"卷曲": u"卷曲",
u"卷染": u"卷染",
u"卷烟": u"卷煙",
u"卷筒": u"卷筒",
u"卷纬": u"卷緯",
u"卷绕": u"卷繞",
u"卷装": u"卷裝",
u"卷轴": u"卷軸",
u"卷云": u"卷雲",
u"卷领": u"卷領",
u"卷发": u"卷髮",
u"卷须": u"卷鬚",
u"参与": u"參与",
u"参与者": u"參与者",
u"参合": u"參合",
u"参考价值": u"參考價值",
u"参与": u"參與",
u"参与人员": u"參與人員",
u"参与制": u"參與制",
u"参与感": u"參與感",
u"参与者": u"參與者",
u"参观团": u"參觀團",
u"参观团体": u"參觀團體",
u"参阅": u"參閱",
u"反冲": u"反衝",
u"反复": u"反複",
u"反复": u"反覆",
u"取舍": u"取捨",
u"口里": u"口裡",
u"只准": u"只准",
u"只冲": u"只衝",
u"叮当": u"叮噹",
u"可怜虫": u"可憐虫",
u"可紧可松": u"可緊可鬆",
u"台制": u"台製",
u"司令台": u"司令臺",
u"吃着不尽": u"吃著不盡",
u"吃里扒外": u"吃裡扒外",
u"吃里爬外": u"吃裡爬外",
u"各吊": u"各吊",
u"合伙": u"合伙",
u"合并": u"合併",
u"合着": u"合著",
u"合着者": u"合著者",
u"吊上": u"吊上",
u"吊下": u"吊下",
u"吊了": u"吊了",
u"吊个": u"吊個",
u"吊儿郎当": u"吊兒郎當",
u"吊到": u"吊到",
u"吊去": u"吊去",
u"吊取": u"吊取",
u"吊吊": u"吊吊",
u"吊嗓": u"吊嗓",
u"吊好": u"吊好",
u"吊子": u"吊子",
u"吊带": u"吊帶",
u"吊带裤": u"吊帶褲",
u"吊床": u"吊床",
u"吊得": u"吊得",
u"吊挂": u"吊掛",
u"吊挂着": u"吊掛著",
u"吊杆": u"吊杆",
u"吊架": u"吊架",
u"吊桶": u"吊桶",
u"吊杆": u"吊桿",
u"吊桥": u"吊橋",
u"吊死": u"吊死",
u"吊灯": u"吊燈",
u"吊环": u"吊環",
u"吊盘": u"吊盤",
u"吊索": u"吊索",
u"吊着": u"吊著",
u"吊装": u"吊裝",
u"吊裤": u"吊褲",
u"吊裤带": u"吊褲帶",
u"吊袜": u"吊襪",
u"吊走": u"吊走",
u"吊起": u"吊起",
u"吊车": u"吊車",
u"吊钩": u"吊鉤",
u"吊销": u"吊銷",
u"吊钟": u"吊鐘",
u"同伙": u"同伙",
u"名表": u"名錶",
u"后冠": u"后冠",
u"后土": u"后土",
u"后妃": u"后妃",
u"后座": u"后座",
u"后稷": u"后稷",
u"后羿": u"后羿",
u"后里": u"后里",
u"向着": u"向著",
u"吞并": u"吞併",
u"吹发": u"吹髮",
u"吕后": u"呂后",
u"獃里獃气": u"呆裡呆氣",
u"周而复始": u"周而複始",
u"呼吁": u"呼籲",
u"和面": u"和麵",
u"哪里": u"哪裡",
u"哭脏": u"哭髒",
u"问卷": u"問卷",
u"喝采": u"喝采",
u"单干": u"單干",
u"单只": u"單隻",
u"嘴里": u"嘴裡",
u"恶心": u"噁心",
u"当啷": u"噹啷",
u"当当": u"噹噹",
u"噜苏": u"嚕囌",
u"向导": u"嚮導",
u"向往": u"嚮往",
u"向应": u"嚮應",
u"向日": u"嚮日",
u"向迩": u"嚮邇",
u"严丝合缝": u"嚴絲合縫",
u"严复": u"嚴複",
u"四舍五入": u"四捨五入",
u"四只": u"四隻",
u"四出": u"四齣",
u"回丝": u"回絲",
u"回着": u"回著",
u"回荡": u"回蕩",
u"回复": u"回覆",
u"回采": u"回采",
u"圈子里": u"圈子裡",
u"圈里": u"圈裡",
u"国历": u"國曆",
u"国雠": u"國讎",
u"园里": u"園裡",
u"图里": u"圖裡",
u"土里": u"土裡",
u"土制": u"土製",
u"地志": u"地誌",
u"坍台": u"坍臺",
u"坑里": u"坑裡",
u"坦荡": u"坦蕩",
u"垂发": u"垂髮",
u"垮台": u"垮臺",
u"埋布": u"埋佈",
u"城里": u"城裡",
u"基干": u"基幹",
u"报复": u"報複",
u"塌台": u"塌臺",
u"塔台": u"塔臺",
u"涂着": u"塗著",
u"墓志": u"墓誌",
u"墨斗": u"墨斗",
u"墨索里尼": u"墨索裡尼",
u"垦复": u"墾複",
u"垄断价格": u"壟斷價格",
u"垄断资产": u"壟斷資產",
u"垄断集团": u"壟斷集團",
u"壶里": u"壺裡",
u"寿面": u"壽麵",
u"夏天里": u"夏天裡",
u"夏历": u"夏曆",
u"外制": u"外製",
u"多冲": u"多衝",
u"多采多姿": u"多采多姿",
u"多么": u"多麼",
u"夜光表": u"夜光錶",
u"夜里": u"夜裡",
u"梦里": u"夢裡",
u"大伙": u"大伙",
u"大卷": u"大卷",
u"大干": u"大干",
u"大干": u"大幹",
u"大锤": u"大鎚",
u"大只": u"大隻",
u"天后": u"天后",
u"天干": u"天干",
u"天文台": u"天文臺",
u"天翻地复": u"天翻地覆",
u"太后": u"太后",
u"奏折": u"奏摺",
u"女丑": u"女丑",
u"女佣": u"女佣",
u"好家夥": u"好傢夥",
u"好戏连台": u"好戲連臺",
u"如法泡制": u"如法泡製",
u"妆台": u"妝臺",
u"姜太公": u"姜太公",
u"姜子牙": u"姜子牙",
u"姜丝": u"姜絲",
u"字汇": u"字彙",
u"字里行间": u"字裡行間",
u"存折": u"存摺",
u"孟姜女": u"孟姜女",
u"宇宙志": u"宇宙誌",
u"定准": u"定准",
u"定制": u"定製",
u"宣布": u"宣佈",
u"宫里": u"宮裡",
u"家伙": u"家伙",
u"家里": u"家裡",
u"密布": u"密佈",
u"寇雠": u"寇讎",
u"实干": u"實幹",
u"写字台": u"寫字檯",
u"写字台": u"寫字臺",
u"宽松": u"寬鬆",
u"封面里": u"封面裡",
u"射干": u"射干",
u"对表": u"對錶",
u"小丑": u"小丑",
u"小伙": u"小伙",
u"小只": u"小隻",
u"少吊": u"少吊",
u"尺布斗粟": u"尺布斗粟",
u"尼克松": u"尼克鬆",
u"尼采": u"尼采",
u"尿斗": u"尿斗",
u"局里": u"局裡",
u"居里": u"居裡",
u"屋子里": u"屋子裡",
u"屋里": u"屋裡",
u"展布": u"展佈",
u"屡仆屡起": u"屢仆屢起",
u"屯里": u"屯裡",
u"山岳": u"山嶽",
u"山里": u"山裡",
u"峰回": u"峰迴",
u"巡回": u"巡迴",
u"巧干": u"巧幹",
u"巴尔干": u"巴爾幹",
u"巴里": u"巴裡",
u"巷里": u"巷裡",
u"市里": u"市裡",
u"布谷": u"布穀",
u"希腊": u"希腊",
u"帘子": u"帘子",
u"帘布": u"帘布",
u"席卷": u"席卷",
u"带团参加": u"帶團參加",
u"带发修行": u"帶髮修行",
u"干休": u"干休",
u"干系": u"干係",
u"干卿何事": u"干卿何事",
u"干将": u"干將",
u"干戈": u"干戈",
u"干挠": u"干撓",
u"干扰": u"干擾",
u"干支": u"干支",
u"干政": u"干政",
u"干时": u"干時",
u"干涉": u"干涉",
u"干犯": u"干犯",
u"干与": u"干與",
u"干着急": u"干著急",
u"干贝": u"干貝",
u"干预": u"干預",
u"平台": u"平臺",
u"年历": u"年曆",
u"年里": u"年裡",
u"干上": u"幹上",
u"干下去": u"幹下去",
u"干了": u"幹了",
u"干事": u"幹事",
u"干些": u"幹些",
u"干个": u"幹個",
u"干劲": u"幹勁",
u"干员": u"幹員",
u"干吗": u"幹嗎",
u"干嘛": u"幹嘛",
u"干坏事": u"幹壞事",
u"干完": u"幹完",
u"干得": u"幹得",
u"干性油": u"幹性油",
u"干才": u"幹才",
u"干掉": u"幹掉",
u"干校": u"幹校",
u"干活": u"幹活",
u"干流": u"幹流",
u"干球温度": u"幹球溫度",
u"干线": u"幹線",
u"干练": u"幹練",
u"干警": u"幹警",
u"干起来": u"幹起來",
u"干路": u"幹路",
u"干道": u"幹道",
u"干部": u"幹部",
u"干么": u"幹麼",
u"几丝": u"幾絲",
u"几只": u"幾隻",
u"几出": u"幾齣",
u"底里": u"底裡",
u"康采恩": u"康采恩",
u"庙里": u"廟裡",
u"建台": u"建臺",
u"弄脏": u"弄髒",
u"弔卷": u"弔卷",
u"弘历": u"弘曆",
u"别扭": u"彆扭",
u"别拗": u"彆拗",
u"别气": u"彆氣",
u"别脚": u"彆腳",
u"别着": u"彆著",
u"弹子台": u"彈子檯",
u"弹药": u"彈葯",
u"汇报": u"彙報",
u"汇整": u"彙整",
u"汇编": u"彙編",
u"汇总": u"彙總",
u"汇纂": u"彙纂",
u"汇辑": u"彙輯",
u"汇集": u"彙集",
u"形单影只": u"形單影隻",
u"影后": u"影后",
u"往里": u"往裡",
u"往复": u"往複",
u"征伐": u"征伐",
u"征兵": u"征兵",
u"征尘": u"征塵",
u"征夫": u"征夫",
u"征战": u"征戰",
u"征收": u"征收",
u"征服": u"征服",
u"征求": u"征求",
u"征发": u"征發",
u"征衣": u"征衣",
u"征讨": u"征討",
u"征途": u"征途",
u"后台": u"後臺",
u"从里到外": u"從裡到外",
u"从里向外": u"從裡向外",
u"复雠": u"復讎",
u"复辟": u"復辟",
u"德干高原": u"德干高原",
u"心愿": u"心愿",
u"心荡神驰": u"心蕩神馳",
u"心里": u"心裡",
u"忙里": u"忙裡",
u"快干": u"快幹",
u"快冲": u"快衝",
u"怎么": u"怎麼",
u"怎么着": u"怎麼著",
u"怒发冲冠": u"怒髮衝冠",
u"急冲而下": u"急衝而下",
u"怪里怪气": u"怪裡怪氣",
u"恩准": u"恩准",
u"情有所钟": u"情有所鍾",
u"意面": u"意麵",
u"慌里慌张": u"慌裡慌張",
u"慰借": u"慰藉",
u"忧郁": u"憂郁",
u"凭吊": u"憑吊",
u"凭借": u"憑藉",
u"凭借着": u"憑藉著",
u"蒙懂": u"懞懂",
u"怀里": u"懷裡",
u"怀表": u"懷錶",
u"悬吊": u"懸吊",
u"恋恋不舍": u"戀戀不捨",
u"戏台": u"戲臺",
u"戴表": u"戴錶",
u"戽斗": u"戽斗",
u"房里": u"房裡",
u"手不释卷": u"手不釋卷",
u"手卷": u"手卷",
u"手折": u"手摺",
u"手里": u"手裡",
u"手表": u"手錶",
u"手松": u"手鬆",
u"才干": u"才幹",
u"才高八斗": u"才高八斗",
u"打谷": u"打穀",
u"扞御": u"扞禦",
u"批准": u"批准",
u"批复": u"批複",
u"批复": u"批覆",
u"承制": u"承製",
u"抗御": u"抗禦",
u"折冲": u"折衝",
u"披复": u"披覆",
u"披发": u"披髮",
u"抱朴": u"抱朴",
u"抵御": u"抵禦",
u"拆伙": u"拆伙",
u"拆台": u"拆臺",
u"拈须": u"拈鬚",
u"拉纤": u"拉縴",
u"拉面": u"拉麵",
u"拖吊": u"拖吊",
u"拗别": u"拗彆",
u"拮据": u"拮据",
u"振荡": u"振蕩",
u"捍御": u"捍禦",
u"舍不得": u"捨不得",
u"舍出": u"捨出",
u"舍去": u"捨去",
u"舍命": u"捨命",
u"舍己从人": u"捨己從人",
u"舍己救人": u"捨己救人",
u"舍己为人": u"捨己為人",
u"舍己为公": u"捨己為公",
u"舍己为国": u"捨己為國",
u"舍得": u"捨得",
u"舍我其谁": u"捨我其誰",
u"舍本逐末": u"捨本逐末",
u"舍弃": u"捨棄",
u"舍死忘生": u"捨死忘生",
u"舍生": u"捨生",
u"舍短取长": u"捨短取長",
u"舍身": u"捨身",
u"舍车保帅": u"捨車保帥",
u"舍近求远": u"捨近求遠",
u"捲发": u"捲髮",
u"捵面": u"捵麵",
u"扫荡": u"掃蕩",
u"掌柜": u"掌柜",
u"排骨面": u"排骨麵",
u"挂帘": u"掛帘",
u"挂面": u"掛麵",
u"接着说": u"接著說",
u"提心吊胆": u"提心吊膽",
u"插图卷": u"插圖卷",
u"换吊": u"換吊",
u"换只": u"換隻",
u"换发": u"換髮",
u"摇荡": u"搖蕩",
u"搭伙": u"搭伙",
u"折合": u"摺合",
u"折奏": u"摺奏",
u"折子": u"摺子",
u"折尺": u"摺尺",
u"折扇": u"摺扇",
u"折梯": u"摺梯",
u"折椅": u"摺椅",
u"折叠": u"摺疊",
u"折痕": u"摺痕",
u"折篷": u"摺篷",
u"折纸": u"摺紙",
u"折裙": u"摺裙",
u"撒布": u"撒佈",
u"撚须": u"撚鬚",
u"撞球台": u"撞球檯",
u"擂台": u"擂臺",
u"担仔面": u"擔仔麵",
u"担担面": u"擔擔麵",
u"担着": u"擔著",
u"担负着": u"擔負著",
u"据云": u"據云",
u"擢发难数": u"擢髮難數",
u"摆布": u"擺佈",
u"摄制": u"攝製",
u"支干": u"支幹",
u"收获": u"收穫",
u"改制": u"改製",
u"攻克": u"攻剋",
u"放荡": u"放蕩",
u"放松": u"放鬆",
u"叙说着": u"敘說著",
u"散伙": u"散伙",
u"散布": u"散佈",
u"散荡": u"散蕩",
u"散发": u"散髮",
u"整只": u"整隻",
u"整出": u"整齣",
u"文采": u"文采",
u"斗六": u"斗六",
u"斗南": u"斗南",
u"斗大": u"斗大",
u"斗子": u"斗子",
u"斗室": u"斗室",
u"斗方": u"斗方",
u"斗栱": u"斗栱",
u"斗笠": u"斗笠",
u"斗箕": u"斗箕",
u"斗篷": u"斗篷",
u"斗胆": u"斗膽",
u"斗转参横": u"斗轉參橫",
u"斗量": u"斗量",
u"斗门": u"斗門",
u"料斗": u"料斗",
u"斯里兰卡": u"斯裡蘭卡",
u"新历": u"新曆",
u"断头台": u"斷頭臺",
u"方才": u"方纔",
u"施舍": u"施捨",
u"旋绕着": u"旋繞著",
u"旋回": u"旋迴",
u"族里": u"族裡",
u"日历": u"日曆",
u"日志": u"日誌",
u"日进斗金": u"日進斗金",
u"明了": u"明瞭",
u"明窗净几": u"明窗淨几",
u"明里": u"明裡",
u"星斗": u"星斗",
u"星历": u"星曆",
u"星移斗换": u"星移斗換",
u"星移斗转": u"星移斗轉",
u"星罗棋布": u"星羅棋佈",
u"星辰表": u"星辰錶",
u"春假里": u"春假裡",
u"春天里": u"春天裡",
u"晃荡": u"晃蕩",
u"景致": u"景緻",
u"暗地里": u"暗地裡",
u"暗沟里": u"暗溝裡",
u"暗里": u"暗裡",
u"历数": u"曆數",
u"历书": u"曆書",
u"历法": u"曆法",
u"书卷": u"書卷",
u"会干": u"會幹",
u"会里": u"會裡",
u"月历": u"月曆",
u"月台": u"月臺",
u"有只": u"有隻",
u"木制": u"木製",
u"本台": u"本臺",
u"朴子": u"朴子",
u"朴实": u"朴實",
u"朴硝": u"朴硝",
u"朴素": u"朴素",
u"朴资茅斯": u"朴資茅斯",
u"村里": u"村裡",
u"束发": u"束髮",
u"东岳": u"東嶽",
u"东征": u"東征",
u"松赞干布": u"松贊干布",
u"板着脸": u"板著臉",
u"板荡": u"板蕩",
u"枕借": u"枕藉",
u"林宏岳": u"林宏嶽",
u"枝干": u"枝幹",
u"枯干": u"枯幹",
u"某只": u"某隻",
u"染发": u"染髮",
u"柜上": u"柜上",
u"柜台": u"柜台",
u"柜子": u"柜子",
u"查卷": u"查卷",
u"查号台": u"查號臺",
u"校雠学": u"校讎學",
u"核准": u"核准",
u"核复": u"核覆",
u"格里": u"格裡",
u"案卷": u"案卷",
u"条干": u"條幹",
u"棉卷": u"棉卷",
u"棉制": u"棉製",
u"植发": u"植髮",
u"楼台": u"樓臺",
u"标志着": u"標志著",
u"标致": u"標緻",
u"标志": u"標誌",
u"模制": u"模製",
u"树干": u"樹幹",
u"横征暴敛": u"橫征暴斂",
u"横冲": u"橫衝",
u"档卷": u"檔卷",
u"检复": u"檢覆",
u"台子": u"檯子",
u"台布": u"檯布",
u"台灯": u"檯燈",
u"台球": u"檯球",
u"台面": u"檯面",
u"柜台": u"櫃檯",
u"柜台": u"櫃臺",
u"栏干": u"欄干",
u"欺蒙": u"欺矇",
u"歌后": u"歌后",
u"欧几里得": u"歐幾裡得",
u"正当着": u"正當著",
u"武后": u"武后",
u"武松": u"武鬆",
u"归并": u"歸併",
u"死里求生": u"死裡求生",
u"死里逃生": u"死裡逃生",
u"残卷": u"殘卷",
u"杀虫药": u"殺虫藥",
u"壳里": u"殼裡",
u"母后": u"母后",
u"每只": u"每隻",
u"比干": u"比干",
u"毛卷": u"毛卷",
u"毛发": u"毛髮",
u"毫发": u"毫髮",
u"气冲牛斗": u"氣沖牛斗",
u"气象台": u"氣象臺",
u"氯霉素": u"氯黴素",
u"水斗": u"水斗",
u"水里": u"水裡",
u"水表": u"水錶",
u"永历": u"永曆",
u"污蔑": u"汙衊",
u"池里": u"池裡",
u"污蔑": u"污衊",
u"沈着": u"沈著",
u"没事干": u"沒事幹",
u"没精打采": u"沒精打采",
u"冲着": u"沖著",
u"沙里淘金": u"沙裡淘金",
u"河里": u"河裡",
u"油面": u"油麵",
u"泡面": u"泡麵",
u"泰斗": u"泰斗",
u"洗手不干": u"洗手不幹",
u"洗发精": u"洗髮精",
u"派团参加": u"派團參加",
u"流荡": u"流蕩",
u"浩荡": u"浩蕩",
u"浪琴表": u"浪琴錶",
u"浪荡": u"浪蕩",
u"浮荡": u"浮蕩",
u"海里": u"海裡",
u"涂着": u"涂著",
u"液晶表": u"液晶錶",
u"凉面": u"涼麵",
u"淡朱": u"淡硃",
u"淫荡": u"淫蕩",
u"测验卷": u"測驗卷",
u"港制": u"港製",
u"游荡": u"游蕩",
u"凑合着": u"湊合著",
u"湖里": u"湖裡",
u"汤团": u"湯糰",
u"汤面": u"湯麵",
u"卤制": u"滷製",
u"卤面": u"滷麵",
u"满布": u"滿佈",
u"漂荡": u"漂蕩",
u"漏斗": u"漏斗",
u"演奏台": u"演奏臺",
u"潭里": u"潭裡",
u"激荡": u"激蕩",
u"浓郁": u"濃郁",
u"浓发": u"濃髮",
u"湿地松": u"濕地鬆",
u"蒙蒙": u"濛濛",
u"蒙雾": u"濛霧",
u"瀛台": u"瀛臺",
u"弥漫": u"瀰漫",
u"弥漫着": u"瀰漫著",
u"火并": u"火併",
u"灰蒙": u"灰濛",
u"炒面": u"炒麵",
u"炮制": u"炮製",
u"炸药": u"炸葯",
u"炸酱面": u"炸醬麵",
u"为着": u"為著",
u"乌干达": u"烏干達",
u"乌苏里江": u"烏蘇裡江",
u"乌发": u"烏髮",
u"乌龙面": u"烏龍麵",
u"烘制": u"烘製",
u"烽火台": u"烽火臺",
u"无干": u"無干",
u"无精打采": u"無精打采",
u"炼制": u"煉製",
u"烟卷儿": u"煙卷兒",
u"烟斗": u"煙斗",
u"烟斗丝": u"煙斗絲",
u"烟台": u"煙臺",
u"照准": u"照准",
u"熨斗": u"熨斗",
u"灯台": u"燈臺",
u"燎发": u"燎髮",
u"烫发": u"燙髮",
u"烫面": u"燙麵",
u"烛台": u"燭臺",
u"炉台": u"爐臺",
u"爽荡": u"爽蕩",
u"片言只语": u"片言隻語",
u"牛肉面": u"牛肉麵",
u"牛只": u"牛隻",
u"特准": u"特准",
u"特征": u"特征",
u"特里": u"特裡",
u"特制": u"特製",
u"牵系": u"牽繫",
u"狼借": u"狼藉",
u"猛冲": u"猛衝",
u"奖杯": u"獎盃",
u"获准": u"獲准",
u"率团参加": u"率團參加",
u"王侯后": u"王侯后",
u"王后": u"王后",
u"班里": u"班裡",
u"理发": u"理髮",
u"瑶台": u"瑤臺",
u"甚么": u"甚麼",
u"甜面酱": u"甜麵醬",
u"生力面": u"生力麵",
u"生锈": u"生鏽",
u"生发": u"生髮",
u"田里": u"田裡",
u"由馀": u"由余",
u"男佣": u"男佣",
u"男用表": u"男用錶",
u"留发": u"留髮",
u"畚斗": u"畚斗",
u"当着": u"當著",
u"疏松": u"疏鬆",
u"疲困": u"疲睏",
u"病症": u"病癥",
u"症候": u"癥候",
u"症状": u"癥狀",
u"症结": u"癥結",
u"登台": u"登臺",
u"发布": u"發佈",
u"发着": u"發著",
u"发面": u"發麵",
u"发霉": u"發黴",
u"白卷": u"白卷",
u"白干儿": u"白干兒",
u"白发": u"白髮",
u"白面": u"白麵",
u"百里": u"百裡",
u"百只": u"百隻",
u"皇后": u"皇后",
u"皇历": u"皇曆",
u"皓发": u"皓髮",
u"皮里阳秋": u"皮裏陽秋",
u"皮里春秋": u"皮裡春秋",
u"皮制": u"皮製",
u"皱折": u"皺摺",
u"盒里": u"盒裡",
u"监制": u"監製",
u"盘里": u"盤裡",
u"盘回": u"盤迴",
u"直接参与": u"直接參与",
u"直冲": u"直衝",
u"相克": u"相剋",
u"相干": u"相干",
u"相冲": u"相衝",
u"看台": u"看臺",
u"眼帘": u"眼帘",
u"眼眶里": u"眼眶裡",
u"眼里": u"眼裡",
u"困乏": u"睏乏",
u"睡着了": u"睡著了",
u"了如": u"瞭如",
u"了望": u"瞭望",
u"了然": u"瞭然",
u"了若指掌": u"瞭若指掌",
u"了解": u"瞭解",
u"蒙住": u"矇住",
u"蒙昧无知": u"矇昧無知",
u"蒙混": u"矇混",
u"蒙蒙": u"矇矇",
u"蒙眬": u"矇矓",
u"蒙蔽": u"矇蔽",
u"蒙骗": u"矇騙",
u"短发": u"短髮",
u"石英表": u"石英錶",
u"研制": u"研製",
u"砰当": u"砰噹",
u"砲台": u"砲臺",
u"朱唇皓齿": u"硃唇皓齒",
u"朱批": u"硃批",
u"朱砂": u"硃砂",
u"朱笔": u"硃筆",
u"朱红色": u"硃紅色",
u"朱色": u"硃色",
u"硬干": u"硬幹",
u"砚台": u"硯臺",
u"碑志": u"碑誌",
u"磁制": u"磁製",
u"磨制": u"磨製",
u"示复": u"示覆",
u"社里": u"社裡",
u"神采": u"神采",
u"御侮": u"禦侮",
u"御寇": u"禦寇",
u"御寒": u"禦寒",
u"御敌": u"禦敵",
u"秃发": u"禿髮",
u"秀发": u"秀髮",
u"私下里": u"私下裡",
u"秋天里": u"秋天裡",
u"秋裤": u"秋褲",
u"秒表": u"秒錶",
u"稀松": u"稀鬆",
u"禀复": u"稟覆",
u"稻谷": u"稻穀",
u"稽征": u"稽征",
u"谷仓": u"穀倉",
u"谷场": u"穀場",
u"谷子": u"穀子",
u"谷壳": u"穀殼",
u"谷物": u"穀物",
u"谷皮": u"穀皮",
u"谷神": u"穀神",
u"谷粒": u"穀粒",
u"谷舱": u"穀艙",
u"谷苗": u"穀苗",
u"谷草": u"穀草",
u"谷贱伤农": u"穀賤傷農",
u"谷道": u"穀道",
u"谷雨": u"穀雨",
u"谷类": u"穀類",
u"积极参与": u"積极參与",
u"积极参加": u"積极參加",
u"空荡": u"空蕩",
u"窗帘": u"窗帘",
u"窗明几净": u"窗明几淨",
u"窗台": u"窗檯",
u"窗台": u"窗臺",
u"窝里": u"窩裡",
u"窝阔台": u"窩闊臺",
u"穷追不舍": u"窮追不捨",
u"笆斗": u"笆斗",
u"笑里藏刀": u"笑裡藏刀",
u"第一卷": u"第一卷",
u"筋斗": u"筋斗",
u"答卷": u"答卷",
u"答复": u"答複",
u"答复": u"答覆",
u"筵几": u"筵几",
u"箕斗": u"箕斗",
u"签着": u"簽著",
u"吁求": u"籲求",
u"吁请": u"籲請",
u"粗制": u"粗製",
u"粗卤": u"粗鹵",
u"精干": u"精幹",
u"精明强干": u"精明強幹",
u"精致": u"精緻",
u"精制": u"精製",
u"精辟": u"精辟",
u"精采": u"精采",
u"糊里糊涂": u"糊裡糊塗",
u"团子": u"糰子",
u"系着": u"系著",
u"纪历": u"紀曆",
u"红发": u"紅髮",
u"红霉素": u"紅黴素",
u"纡回": u"紆迴",
u"纳采": u"納采",
u"素食面": u"素食麵",
u"素面": u"素麵",
u"紫微斗数": u"紫微斗數",
u"细致": u"細緻",
u"组里": u"組裡",
u"结发": u"結髮",
u"绝对参照": u"絕對參照",
u"丝来线去": u"絲來線去",
u"丝布": u"絲布",
u"丝板": u"絲板",
u"丝瓜布": u"絲瓜布",
u"丝绒布": u"絲絨布",
u"丝线": u"絲線",
u"丝织厂": u"絲織廠",
u"丝虫": u"絲蟲",
u"綑吊": u"綑吊",
u"经卷": u"經卷",
u"绿霉素": u"綠黴素",
u"维系": u"維繫",
u"绾发": u"綰髮",
u"网里": u"網裡",
u"紧绷": u"緊繃",
u"紧绷着": u"緊繃著",
u"紧追不舍": u"緊追不捨",
u"编制": u"編製",
u"编发": u"編髮",
u"缓冲": u"緩衝",
u"致密": u"緻密",
u"萦回": u"縈迴",
u"县里": u"縣裡",
u"县志": u"縣誌",
u"缝里": u"縫裡",
u"缝制": u"縫製",
u"纤夫": u"縴夫",
u"繁复": u"繁複",
u"绷住": u"繃住",
u"绷子": u"繃子",
u"绷带": u"繃帶",
u"绷紧": u"繃緊",
u"绷脸": u"繃臉",
u"绷着": u"繃著",
u"绷着脸": u"繃著臉",
u"绷着脸儿": u"繃著臉兒",
u"绷开": u"繃開",
u"绘制": u"繪製",
u"系上": u"繫上",
u"系到": u"繫到",
u"系囚": u"繫囚",
u"系心": u"繫心",
u"系念": u"繫念",
u"系怀": u"繫懷",
u"系数": u"繫數",
u"系于": u"繫於",
u"系系": u"繫系",
u"系紧": u"繫緊",
u"系绳": u"繫繩",
u"系着": u"繫著",
u"系辞": u"繫辭",
u"缴卷": u"繳卷",
u"累囚": u"纍囚",
u"累累": u"纍纍",
u"坛子": u"罈子",
u"坛坛罐罐": u"罈罈罐罐",
u"骂着": u"罵著",
u"美制": u"美製",
u"美发": u"美髮",
u"翻来复去": u"翻來覆去",
u"翻天复地": u"翻天覆地",
u"翻复": u"翻覆",
u"翻云复雨": u"翻雲覆雨",
u"老么": u"老么",
u"老板": u"老闆",
u"考卷": u"考卷",
u"耕获": u"耕穫",
u"聊斋志异": u"聊齋誌異",
u"联系": u"聯係",
u"联系": u"聯繫",
u"肉丝面": u"肉絲麵",
u"肉羹面": u"肉羹麵",
u"肉松": u"肉鬆",
u"肢体": u"肢体",
u"背向着": u"背向著",
u"背地里": u"背地裡",
u"胡里胡涂": u"胡裡胡塗",
u"能干": u"能幹",
u"脉冲": u"脈衝",
u"脱发": u"脫髮",
u"腊味": u"腊味",
u"腊笔": u"腊筆",
u"腊肉": u"腊肉",
u"脑子里": u"腦子裡",
u"腰里": u"腰裡",
u"胶卷": u"膠卷",
u"自制": u"自製",
u"自觉自愿": u"自覺自愿",
u"台上": u"臺上",
u"台下": u"臺下",
u"台中": u"臺中",
u"台北": u"臺北",
u"台南": u"臺南",
u"台地": u"臺地",
u"台塑": u"臺塑",
u"台大": u"臺大",
u"台币": u"臺幣",
u"台座": u"臺座",
u"台东": u"臺東",
u"台柱": u"臺柱",
u"台榭": u"臺榭",
u"台汽": u"臺汽",
u"台海": u"臺海",
u"台澎金马": u"臺澎金馬",
u"台湾": u"臺灣",
u"台灯": u"臺燈",
u"台球": u"臺球",
u"台省": u"臺省",
u"台端": u"臺端",
u"台糖": u"臺糖",
u"台肥": u"臺肥",
u"台航": u"臺航",
u"台视": u"臺視",
u"台词": u"臺詞",
u"台车": u"臺車",
u"台铁": u"臺鐵",
u"台阶": u"臺階",
u"台电": u"臺電",
u"台面": u"臺面",
u"舂谷": u"舂穀",
u"兴致": u"興緻",
u"兴高采烈": u"興高采烈",
u"旧历": u"舊曆",
u"舒卷": u"舒卷",
u"舞台": u"舞臺",
u"航海历": u"航海曆",
u"船只": u"船隻",
u"舰只": u"艦隻",
u"芬郁": u"芬郁",
u"花卷": u"花卷",
u"花盆里": u"花盆裡",
u"花采": u"花采",
u"苑里": u"苑裡",
u"若干": u"若干",
u"苦干": u"苦幹",
u"苦里": u"苦裏",
u"苦卤": u"苦鹵",
u"范仲淹": u"范仲淹",
u"范蠡": u"范蠡",
u"范阳": u"范陽",
u"茅台": u"茅臺",
u"茶几": u"茶几",
u"草丛里": u"草叢裡",
u"庄里": u"莊裡",
u"茎干": u"莖幹",
u"莽荡": u"莽蕩",
u"菌丝体": u"菌絲体",
u"菌丝体": u"菌絲體",
u"华里": u"華裡",
u"华发": u"華髮",
u"万卷": u"萬卷",
u"万历": u"萬曆",
u"万只": u"萬隻",
u"落发": u"落髮",
u"着儿": u"著兒",
u"着书立说": u"著書立說",
u"着色软体": u"著色軟體",
u"着重指出": u"著重指出",
u"着录": u"著錄",
u"着录规则": u"著錄規則",
u"蓄发": u"蓄髮",
u"蓄须": u"蓄鬚",
u"蓬发": u"蓬髮",
u"蓬松": u"蓬鬆",
u"莲台": u"蓮臺",
u"荡来荡去": u"蕩來蕩去",
u"荡女": u"蕩女",
u"荡妇": u"蕩婦",
u"荡寇": u"蕩寇",
u"荡平": u"蕩平",
u"荡涤": u"蕩滌",
u"荡漾": u"蕩漾",
u"荡然": u"蕩然",
u"荡舟": u"蕩舟",
u"荡船": u"蕩船",
u"荡荡": u"蕩蕩",
u"薑丝": u"薑絲",
u"薙发": u"薙髮",
u"借以": u"藉以",
u"借口": u"藉口",
u"借故": u"藉故",
u"借机": u"藉機",
u"借此": u"藉此",
u"借由": u"藉由",
u"借端": u"藉端",
u"借着": u"藉著",
u"借借": u"藉藉",
u"借词": u"藉詞",
u"借资": u"藉資",
u"借酒浇愁": u"藉酒澆愁",
u"藤制": u"藤製",
u"蕴含着": u"蘊含著",
u"蕴涵着": u"蘊涵著",
u"蕴借": u"蘊藉",
u"萝卜": u"蘿蔔",
u"虎须": u"虎鬚",
u"号志": u"號誌",
u"蜂后": u"蜂后",
u"蛮干": u"蠻幹",
u"行事历": u"行事曆",
u"胡同": u"衚衕",
u"冲上": u"衝上",
u"冲下": u"衝下",
u"冲来": u"衝來",
u"冲倒": u"衝倒",
u"冲出": u"衝出",
u"冲到": u"衝到",
u"冲刺": u"衝刺",
u"冲克": u"衝剋",
u"冲力": u"衝力",
u"冲劲": u"衝勁",
u"冲动": u"衝動",
u"冲去": u"衝去",
u"冲口": u"衝口",
u"冲垮": u"衝垮",
u"冲堂": u"衝堂",
u"冲压": u"衝壓",
u"冲天": u"衝天",
u"冲掉": u"衝掉",
u"冲撞": u"衝撞",
u"冲击": u"衝擊",
u"冲散": u"衝散",
u"冲决": u"衝決",
u"冲浪": u"衝浪",
u"冲激": u"衝激",
u"冲破": u"衝破",
u"冲程": u"衝程",
u"冲突": u"衝突",
u"冲线": u"衝線",
u"冲着": u"衝著",
u"冲冲": u"衝衝",
u"冲要": u"衝要",
u"冲起": u"衝起",
u"冲进": u"衝進",
u"冲过": u"衝過",
u"冲锋": u"衝鋒",
u"表里": u"表裡",
u"袖里": u"袖裡",
u"被里": u"被裡",
u"被复": u"被複",
u"被复": u"被覆",
u"被复着": u"被覆著",
u"被发": u"被髮",
u"裁并": u"裁併",
u"裁制": u"裁製",
u"里面": u"裏面",
u"里人": u"裡人",
u"里加": u"裡加",
u"里外": u"裡外",
u"里子": u"裡子",
u"里屋": u"裡屋",
u"里层": u"裡層",
u"里布": u"裡布",
u"里带": u"裡帶",
u"里弦": u"裡弦",
u"里应外合": u"裡應外合",
u"里拉": u"裡拉",
u"里斯": u"裡斯",
u"里海": u"裡海",
u"里脊": u"裡脊",
u"里衣": u"裡衣",
u"里里": u"裡裡",
u"里通外国": u"裡通外國",
u"里通外敌": u"裡通外敵",
u"里边": u"裡邊",
u"里间": u"裡間",
u"里面": u"裡面",
u"里头": u"裡頭",
u"制件": u"製件",
u"制作": u"製作",
u"制做": u"製做",
u"制备": u"製備",
u"制冰": u"製冰",
u"制冷": u"製冷",
u"制剂": u"製劑",
u"制品": u"製品",
u"制图": u"製圖",
u"制成": u"製成",
u"制法": u"製法",
u"制为": u"製為",
u"制片": u"製片",
u"制版": u"製版",
u"制程": u"製程",
u"制糖": u"製糖",
u"制纸": u"製紙",
u"制药": u"製藥",
u"制表": u"製表",
u"制裁": u"製裁",
u"制造": u"製造",
u"制革": u"製革",
u"制鞋": u"製鞋",
u"制盐": u"製鹽",
u"复仞年如": u"複仞年如",
u"复以百万": u"複以百萬",
u"复位": u"複位",
u"复信": u"複信",
u"复分数": u"複分數",
u"复列": u"複列",
u"复利": u"複利",
u"复印": u"複印",
u"复原": u"複原",
u"复句": u"複句",
u"复合": u"複合",
u"复名": u"複名",
u"复员": u"複員",
u"复壁": u"複壁",
u"复壮": u"複壯",
u"复姓": u"複姓",
u"复字键": u"複字鍵",
u"复审": u"複審",
u"复写": u"複寫",
u"复式": u"複式",
u"复复": u"複復",
u"复数": u"複數",
u"复本": u"複本",
u"复查": u"複查",
u"复核": u"複核",
u"复检": u"複檢",
u"复次": u"複次",
u"复比": u"複比",
u"复决": u"複決",
u"复活": u"複活",
u"复测": u"複測",
u"复亩珍": u"複畝珍",
u"复发": u"複發",
u"复目": u"複目",
u"复眼": u"複眼",
u"复种": u"複種",
u"复线": u"複線",
u"复习": u"複習",
u"复兴社": u"複興社",
u"复旧": u"複舊",
u"复色": u"複色",
u"复叶": u"複葉",
u"复盖": u"複蓋",
u"复苏": u"複蘇",
u"复制": u"複製",
u"复诊": u"複診",
u"复词": u"複詞",
u"复试": u"複試",
u"复课": u"複課",
u"复议": u"複議",
u"复变函数": u"複變函數",
u"复赛": u"複賽",
u"复述": u"複述",
u"复选": u"複選",
u"复钱": u"複錢",
u"复杂": u"複雜",
u"复电": u"複電",
u"复音": u"複音",
u"复韵": u"複韻",
u"衬里": u"襯裡",
u"西岳": u"西嶽",
u"西征": u"西征",
u"西历": u"西曆",
u"要冲": u"要衝",
u"要么": u"要麼",
u"复上": u"覆上",
u"复亡": u"覆亡",
u"复住": u"覆住",
u"复信": u"覆信",
u"复命": u"覆命",
u"复在": u"覆在",
u"复审": u"覆審",
u"复巢之下": u"覆巢之下",
u"复成": u"覆成",
u"复败": u"覆敗",
u"复文": u"覆文",
u"复校": u"覆校",
u"复核": u"覆核",
u"复水难收": u"覆水難收",
u"复没": u"覆沒",
u"复灭": u"覆滅",
u"复盆": u"覆盆",
u"复舟": u"覆舟",
u"复着": u"覆著",
u"复盖": u"覆蓋",
u"复盖着": u"覆蓋著",
u"复试": u"覆試",
u"复议": u"覆議",
u"复车": u"覆車",
u"复载": u"覆載",
u"复辙": u"覆轍",
u"复电": u"覆電",
u"见复": u"見覆",
u"亲征": u"親征",
u"观众台": u"觀眾臺",
u"观台": u"觀臺",
u"观象台": u"觀象臺",
u"角落里": u"角落裡",
u"觔斗": u"觔斗",
u"触须": u"觸鬚",
u"订制": u"訂製",
u"诉说着": u"訴說著",
u"词汇": u"詞彙",
u"试卷": u"試卷",
u"诗卷": u"詩卷",
u"话里有话": u"話裡有話",
u"志哀": u"誌哀",
u"志喜": u"誌喜",
u"志庆": u"誌慶",
u"语云": u"語云",
u"语汇": u"語彙",
u"诬蔑": u"誣衊",
u"诵经台": u"誦經臺",
u"说着": u"說著",
u"课征": u"課征",
u"调制": u"調製",
u"调频台": u"調頻臺",
u"请参阅": u"請參閱",
u"讲台": u"講臺",
u"谢绝参观": u"謝絕參觀",
u"护发": u"護髮",
u"雠隙": u"讎隙",
u"豆腐干": u"豆腐干",
u"竖着": u"豎著",
u"丰富多采": u"豐富多采",
u"丰滨": u"豐濱",
u"丰滨乡": u"豐濱鄉",
u"丰采": u"豐采",
u"象征着": u"象徵著",
u"贵干": u"貴幹",
u"贾后": u"賈后",
u"赈饥": u"賑饑",
u"贤后": u"賢后",
u"质朴": u"質朴",
u"赌台": u"賭檯",
u"购并": u"購併",
u"赤松": u"赤鬆",
u"起吊": u"起吊",
u"起复": u"起複",
u"赶制": u"趕製",
u"跌荡": u"跌蕩",
u"跟斗": u"跟斗",
u"跳荡": u"跳蕩",
u"跳表": u"跳錶",
u"踬仆": u"躓仆",
u"躯干": u"軀幹",
u"车库里": u"車庫裡",
u"车站里": u"車站裡",
u"车里": u"車裡",
u"轻松": u"輕鬆",
u"轮回": u"輪迴",
u"转台": u"轉檯",
u"辛丑": u"辛丑",
u"辟邪": u"辟邪",
u"办伙": u"辦伙",
u"办公台": u"辦公檯",
u"辞汇": u"辭彙",
u"农历": u"農曆",
u"迂回": u"迂迴",
u"近日里": u"近日裡",
u"迥然回异": u"迥然迴異",
u"回光返照": u"迴光返照",
u"回向": u"迴向",
u"回圈": u"迴圈",
u"回廊": u"迴廊",
u"回形夹": u"迴形夾",
u"回文": u"迴文",
u"回旋": u"迴旋",
u"回流": u"迴流",
u"回环": u"迴環",
u"回荡": u"迴盪",
u"回纹针": u"迴紋針",
u"回绕": u"迴繞",
u"回肠": u"迴腸",
u"回荡": u"迴蕩",
u"回诵": u"迴誦",
u"回路": u"迴路",
u"回转": u"迴轉",
u"回递性": u"迴遞性",
u"回避": u"迴避",
u"回响": u"迴響",
u"回风": u"迴風",
u"回首": u"迴首",
u"迷蒙": u"迷濛",
u"退伙": u"退伙",
u"这么着": u"這么著",
u"这里": u"這裏",
u"这里": u"這裡",
u"这只": u"這隻",
u"这么": u"這麼",
u"这么着": u"這麼著",
u"通心面": u"通心麵",
u"速食面": u"速食麵",
u"连系": u"連繫",
u"连台好戏": u"連臺好戲",
u"游荡": u"遊蕩",
u"遍布": u"遍佈",
u"递回": u"遞迴",
u"远征": u"遠征",
u"适才": u"適纔",
u"遮复": u"遮覆",
u"还冲": u"還衝",
u"邋里邋遢": u"邋裡邋遢",
u"那里": u"那裡",
u"那只": u"那隻",
u"那么": u"那麼",
u"那么着": u"那麼著",
u"邪辟": u"邪辟",
u"郁烈": u"郁烈",
u"郁穆": u"郁穆",
u"郁郁": u"郁郁",
u"郁闭": u"郁閉",
u"郁馥": u"郁馥",
u"乡愿": u"鄉愿",
u"乡里": u"鄉裡",
u"邻里": u"鄰裡",
u"配合着": u"配合著",
u"配制": u"配製",
u"酒杯": u"酒盃",
u"酒坛": u"酒罈",
u"酥松": u"酥鬆",
u"醋坛": u"醋罈",
u"酝借": u"醞藉",
u"酝酿着": u"醞釀著",
u"医药": u"醫葯",
u"醲郁": u"醲郁",
u"酿制": u"釀製",
u"采地": u"采地",
u"采女": u"采女",
u"采声": u"采聲",
u"采色": u"采色",
u"采邑": u"采邑",
u"里程表": u"里程錶",
u"重折": u"重摺",
u"重复": u"重複",
u"重复": u"重覆",
u"重锤": u"重鎚",
u"野台戏": u"野臺戲",
u"金斗": u"金斗",
u"金表": u"金錶",
u"金发": u"金髮",
u"金霉素": u"金黴素",
u"钉锤": u"釘鎚",
u"银朱": u"銀硃",
u"银发": u"銀髮",
u"铜制": u"銅製",
u"铝制": u"鋁製",
u"钢制": u"鋼製",
u"录着": u"錄著",
u"录制": u"錄製",
u"表带": u"錶帶",
u"表店": u"錶店",
u"表厂": u"錶廠",
u"表壳": u"錶殼",
u"表链": u"錶鏈",
u"表面": u"錶面",
u"锅台": u"鍋臺",
u"锻鍊出": u"鍛鍊出",
u"锻鍊身体": u"鍛鍊身体",
u"锲而不舍": u"鍥而不捨",
u"锤儿": u"鎚兒",
u"锤子": u"鎚子",
u"锤头": u"鎚頭",
u"链霉素": u"鏈黴素",
u"镜台": u"鏡臺",
u"锈病": u"鏽病",
u"锈菌": u"鏽菌",
u"锈蚀": u"鏽蝕",
u"钟表": u"鐘錶",
u"铁锤": u"鐵鎚",
u"铁锈": u"鐵鏽",
u"长征": u"長征",
u"长发": u"長髮",
u"长须鲸": u"長鬚鯨",
u"门帘": u"門帘",
u"门斗": u"門斗",
u"门里": u"門裡",
u"开伙": u"開伙",
u"开卷": u"開卷",
u"开诚布公": u"開誠佈公",
u"开采": u"開采",
u"閒情逸致": u"閒情逸緻",
u"閒荡": u"閒蕩",
u"间不容发": u"間不容髮",
u"闵采尔": u"閔采爾",
u"阅卷": u"閱卷",
u"阑干": u"闌干",
u"关系": u"關係",
u"关系着": u"關係著",
u"防御": u"防禦",
u"防锈": u"防鏽",
u"防台": u"防颱",
u"阿斗": u"阿斗",
u"阿里": u"阿裡",
u"除旧布新": u"除舊佈新",
u"阴干": u"陰干",
u"阴历": u"陰曆",
u"阴郁": u"陰郁",
u"陆征祥": u"陸征祥",
u"阳春面": u"陽春麵",
u"阳历": u"陽曆",
u"阳台": u"陽臺",
u"只字": u"隻字",
u"只影": u"隻影",
u"只手遮天": u"隻手遮天",
u"只眼": u"隻眼",
u"只言片语": u"隻言片語",
u"只身": u"隻身",
u"雅致": u"雅緻",
u"雇佣": u"雇佣",
u"双折": u"雙摺",
u"杂志": u"雜誌",
u"鸡丝": u"雞絲",
u"鸡丝面": u"雞絲麵",
u"鸡腿面": u"雞腿麵",
u"鸡只": u"雞隻",
u"难舍": u"難捨",
u"雪里": u"雪裡",
u"云须": u"雲鬚",
u"电子表": u"電子錶",
u"电台": u"電臺",
u"电冲": u"電衝",
u"电复": u"電覆",
u"电视台": u"電視臺",
u"电表": u"電錶",
u"震荡": u"震蕩",
u"雾里": u"霧裡",
u"露台": u"露臺",
u"灵台": u"靈臺",
u"青瓦台": u"青瓦臺",
u"青霉": u"青黴",
u"面朝着": u"面朝著",
u"面临着": u"面臨著",
u"鞋里": u"鞋裡",
u"鞣制": u"鞣製",
u"秋千": u"鞦韆",
u"鞭辟入里": u"鞭辟入裡",
u"韩国制": u"韓國製",
u"韩制": u"韓製",
u"预制": u"預製",
u"颁布": u"頒佈",
u"头里": u"頭裡",
u"头发": u"頭髮",
u"颊须": u"頰鬚",
u"颠仆": u"顛仆",
u"颠复": u"顛複",
u"颠复": u"顛覆",
u"显着标志": u"顯著標志",
u"风土志": u"風土誌",
u"风斗": u"風斗",
u"风物志": u"風物誌",
u"风里": u"風裡",
u"风采": u"風采",
u"台风": u"颱風",
u"刮了": u"颳了",
u"刮倒": u"颳倒",
u"刮去": u"颳去",
u"刮得": u"颳得",
u"刮着": u"颳著",
u"刮走": u"颳走",
u"刮起": u"颳起",
u"刮风": u"颳風",
u"飘荡": u"飄蕩",
u"饭团": u"飯糰",
u"饼干": u"餅干",
u"馄饨面": u"餛飩麵",
u"饥不择食": u"饑不擇食",
u"饥寒": u"饑寒",
u"饥民": u"饑民",
u"饥渴": u"饑渴",
u"饥溺": u"饑溺",
u"饥荒": u"饑荒",
u"饥饱": u"饑飽",
u"饥饿": u"饑餓",
u"饥馑": u"饑饉",
u"首当其冲": u"首當其衝",
u"香郁": u"香郁",
u"馥郁": u"馥郁",
u"马里": u"馬裡",
u"马表": u"馬錶",
u"骀荡": u"駘蕩",
u"腾冲": u"騰衝",
u"骨子里": u"骨子裡",
u"骨干": u"骨幹",
u"骨灰坛": u"骨灰罈",
u"肮脏": u"骯髒",
u"脏乱": u"髒亂",
u"脏兮兮": u"髒兮兮",
u"脏字": u"髒字",
u"脏得": u"髒得",
u"脏东西": u"髒東西",
u"脏水": u"髒水",
u"脏的": u"髒的",
u"脏话": u"髒話",
u"脏钱": u"髒錢",
u"高干": u"高幹",
u"高台": u"高臺",
u"髭须": u"髭鬚",
u"发型": u"髮型",
u"发夹": u"髮夾",
u"发妻": u"髮妻",
u"发姐": u"髮姐",
u"发带": u"髮帶",
u"发廊": u"髮廊",
u"发式": u"髮式",
u"发指": u"髮指",
u"发捲": u"髮捲",
u"发根": u"髮根",
u"发毛": u"髮毛",
u"发油": u"髮油",
u"发状": u"髮狀",
u"发短心长": u"髮短心長",
u"发端": u"髮端",
u"发结": u"髮結",
u"发丝": u"髮絲",
u"发网": u"髮網",
u"发肤": u"髮膚",
u"发胶": u"髮膠",
u"发菜": u"髮菜",
u"发蜡": u"髮蠟",
u"发辫": u"髮辮",
u"发针": u"髮針",
u"发长": u"髮長",
u"发际": u"髮際",
u"发霜": u"髮霜",
u"发髻": u"髮髻",
u"发鬓": u"髮鬢",
u"鬅松": u"鬅鬆",
u"松了": u"鬆了",
u"松些": u"鬆些",
u"松劲": u"鬆勁",
u"松动": u"鬆動",
u"松口": u"鬆口",
u"松土": u"鬆土",
u"松弛": u"鬆弛",
u"松快": u"鬆快",
u"松懈": u"鬆懈",
u"松手": u"鬆手",
u"松散": u"鬆散",
u"松林": u"鬆林",
u"松柔": u"鬆柔",
u"松毛虫": u"鬆毛蟲",
u"松浮": u"鬆浮",
u"松涛": u"鬆濤",
u"松科": u"鬆科",
u"松节油": u"鬆節油",
u"松绑": u"鬆綁",
u"松紧": u"鬆緊",
u"松缓": u"鬆緩",
u"松脆": u"鬆脆",
u"松脱": u"鬆脫",
u"松起": u"鬆起",
u"松软": u"鬆軟",
u"松通": u"鬆通",
u"松开": u"鬆開",
u"松饼": u"鬆餅",
u"松松": u"鬆鬆",
u"鬈发": u"鬈髮",
u"胡子": u"鬍子",
u"胡梢": u"鬍梢",
u"胡渣": u"鬍渣",
u"胡髭": u"鬍髭",
u"胡须": u"鬍鬚",
u"须根": u"鬚根",
u"须毛": u"鬚毛",
u"须生": u"鬚生",
u"须眉": u"鬚眉",
u"须发": u"鬚髮",
u"须须": u"鬚鬚",
u"鬓发": u"鬢髮",
u"斗着": u"鬥著",
u"闹着玩儿": u"鬧著玩儿",
u"闹着玩儿": u"鬧著玩兒",
u"郁郁": u"鬱郁",
u"鱼松": u"魚鬆",
u"鲸须": u"鯨鬚",
u"鲇鱼": u"鯰魚",
u"鹤发": u"鶴髮",
u"卤化": u"鹵化",
u"卤味": u"鹵味",
u"卤族": u"鹵族",
u"卤水": u"鹵水",
u"卤汁": u"鹵汁",
u"卤簿": u"鹵簿",
u"卤素": u"鹵素",
u"卤莽": u"鹵莽",
u"卤钝": u"鹵鈍",
u"咸味": u"鹹味",
u"咸土": u"鹹土",
u"咸度": u"鹹度",
u"咸得": u"鹹得",
u"咸水": u"鹹水",
u"咸海": u"鹹海",
u"咸淡": u"鹹淡",
u"咸湖": u"鹹湖",
u"咸汤": u"鹹湯",
u"咸的": u"鹹的",
u"咸肉": u"鹹肉",
u"咸菜": u"鹹菜",
u"咸蛋": u"鹹蛋",
u"咸猪肉": u"鹹豬肉",
u"咸类": u"鹹類",
u"咸鱼": u"鹹魚",
u"咸鸭蛋": u"鹹鴨蛋",
u"咸卤": u"鹹鹵",
u"咸咸": u"鹹鹹",
u"盐卤": u"鹽鹵",
u"面价": u"麵價",
u"面包": u"麵包",
u"面团": u"麵團",
u"面店": u"麵店",
u"面厂": u"麵廠",
u"面杖": u"麵杖",
u"面条": u"麵條",
u"面灰": u"麵灰",
u"面皮": u"麵皮",
u"面筋": u"麵筋",
u"面粉": u"麵粉",
u"面糊": u"麵糊",
u"面线": u"麵線",
u"面茶": u"麵茶",
u"面食": u"麵食",
u"面饺": u"麵餃",
u"面饼": u"麵餅",
u"麻酱面": u"麻醬麵",
u"黄历": u"黃曆",
u"黄发垂髫": u"黃髮垂髫",
u"黑发": u"黑髮",
u"黑松": u"黑鬆",
u"霉毒": u"黴毒",
u"霉菌": u"黴菌",
u"鼓里": u"鼓裡",
u"冬冬": u"鼕鼕",
u"龙卷": u"龍卷",
u"龙须": u"龍鬚",
}
zh2Hans = {
u'顯著': u'显著',
u'土著': u'土著',
u'印表機': u'打印机',
u'說明檔案': u'帮助文件',
u"瀋": u"沈",
u"畫": u"划",
u"鍾": u"钟",
u"靦": u"腼",
u"餘": u"余",
u"鯰": u"鲇",
u"鹼": u"碱",
u"㠏": u"㟆",
u"𡞵": u"㛟",
u"万": u"万",
u"与": u"与",
u"丑": u"丑",
u"丟": u"丢",
u"並": u"并",
u"丰": u"丰",
u"么": u"么",
u"乾": u"干",
u"乾坤": u"乾坤",
u"乾隆": u"乾隆",
u"亂": u"乱",
u"云": u"云",
u"亙": u"亘",
u"亞": u"亚",
u"仆": u"仆",
u"价": u"价",
u"伙": u"伙",
u"佇": u"伫",
u"佈": u"布",
u"体": u"体",
u"余": u"余",
u"余": u"馀",
u"佣": u"佣",
u"併": u"并",
u"來": u"来",
u"侖": u"仑",
u"侶": u"侣",
u"俁": u"俣",
u"係": u"系",
u"俔": u"伣",
u"俠": u"侠",
u"倀": u"伥",
u"倆": u"俩",
u"倈": u"俫",
u"倉": u"仓",
u"個": u"个",
u"們": u"们",
u"倫": u"伦",
u"偉": u"伟",
u"側": u"侧",
u"偵": u"侦",
u"偽": u"伪",
u"傑": u"杰",
u"傖": u"伧",
u"傘": u"伞",
u"備": u"备",
u"傢": u"家",
u"傭": u"佣",
u"傯": u"偬",
u"傳": u"传",
u"傴": u"伛",
u"債": u"债",
u"傷": u"伤",
u"傾": u"倾",
u"僂": u"偻",
u"僅": u"仅",
u"僉": u"佥",
u"僑": u"侨",
u"僕": u"仆",
u"僞": u"伪",
u"僥": u"侥",
u"僨": u"偾",
u"價": u"价",
u"儀": u"仪",
u"儂": u"侬",
u"億": u"亿",
u"儈": u"侩",
u"儉": u"俭",
u"儐": u"傧",
u"儔": u"俦",
u"儕": u"侪",
u"儘": u"尽",
u"償": u"偿",
u"優": u"优",
u"儲": u"储",
u"儷": u"俪",
u"儸": u"㑩",
u"儺": u"傩",
u"儻": u"傥",
u"儼": u"俨",
u"儿": u"儿",
u"兇": u"凶",
u"兌": u"兑",
u"兒": u"儿",
u"兗": u"兖",
u"党": u"党",
u"內": u"内",
u"兩": u"两",
u"冊": u"册",
u"冪": u"幂",
u"准": u"准",
u"凈": u"净",
u"凍": u"冻",
u"凜": u"凛",
u"几": u"几",
u"凱": u"凯",
u"划": u"划",
u"別": u"别",
u"刪": u"删",
u"剄": u"刭",
u"則": u"则",
u"剋": u"克",
u"剎": u"刹",
u"剗": u"刬",
u"剛": u"刚",
u"剝": u"剥",
u"剮": u"剐",
u"剴": u"剀",
u"創": u"创",
u"劃": u"划",
u"劇": u"剧",
u"劉": u"刘",
u"劊": u"刽",
u"劌": u"刿",
u"劍": u"剑",
u"劏": u"㓥",
u"劑": u"剂",
u"劚": u"㔉",
u"勁": u"劲",
u"動": u"动",
u"務": u"务",
u"勛": u"勋",
u"勝": u"胜",
u"勞": u"劳",
u"勢": u"势",
u"勩": u"勚",
u"勱": u"劢",
u"勵": u"励",
u"勸": u"劝",
u"勻": u"匀",
u"匭": u"匦",
u"匯": u"汇",
u"匱": u"匮",
u"區": u"区",
u"協": u"协",
u"卷": u"卷",
u"卻": u"却",
u"厂": u"厂",
u"厙": u"厍",
u"厠": u"厕",
u"厭": u"厌",
u"厲": u"厉",
u"厴": u"厣",
u"參": u"参",
u"叄": u"叁",
u"叢": u"丛",
u"台": u"台",
u"叶": u"叶",
u"吊": u"吊",
u"后": u"后",
u"吒": u"咤",
u"吳": u"吴",
u"吶": u"呐",
u"呂": u"吕",
u"獃": u"呆",
u"咼": u"呙",
u"員": u"员",
u"唄": u"呗",
u"唚": u"吣",
u"問": u"问",
u"啓": u"启",
u"啞": u"哑",
u"啟": u"启",
u"啢": u"唡",
u"喎": u"㖞",
u"喚": u"唤",
u"喪": u"丧",
u"喬": u"乔",
u"單": u"单",
u"喲": u"哟",
u"嗆": u"呛",
u"嗇": u"啬",
u"嗊": u"唝",
u"嗎": u"吗",
u"嗚": u"呜",
u"嗩": u"唢",
u"嗶": u"哔",
u"嘆": u"叹",
u"嘍": u"喽",
u"嘔": u"呕",
u"嘖": u"啧",
u"嘗": u"尝",
u"嘜": u"唛",
u"嘩": u"哗",
u"嘮": u"唠",
u"嘯": u"啸",
u"嘰": u"叽",
u"嘵": u"哓",
u"嘸": u"呒",
u"嘽": u"啴",
u"噁": u"恶",
u"噓": u"嘘",
u"噚": u"㖊",
u"噝": u"咝",
u"噠": u"哒",
u"噥": u"哝",
u"噦": u"哕",
u"噯": u"嗳",
u"噲": u"哙",
u"噴": u"喷",
u"噸": u"吨",
u"噹": u"当",
u"嚀": u"咛",
u"嚇": u"吓",
u"嚌": u"哜",
u"嚕": u"噜",
u"嚙": u"啮",
u"嚥": u"咽",
u"嚦": u"呖",
u"嚨": u"咙",
u"嚮": u"向",
u"嚲": u"亸",
u"嚳": u"喾",
u"嚴": u"严",
u"嚶": u"嘤",
u"囀": u"啭",
u"囁": u"嗫",
u"囂": u"嚣",
u"囅": u"冁",
u"囈": u"呓",
u"囌": u"苏",
u"囑": u"嘱",
u"囪": u"囱",
u"圇": u"囵",
u"國": u"国",
u"圍": u"围",
u"園": u"园",
u"圓": u"圆",
u"圖": u"图",
u"團": u"团",
u"坏": u"坏",
u"垵": u"埯",
u"埡": u"垭",
u"埰": u"采",
u"執": u"执",
u"堅": u"坚",
u"堊": u"垩",
u"堖": u"垴",
u"堝": u"埚",
u"堯": u"尧",
u"報": u"报",
u"場": u"场",
u"塊": u"块",
u"塋": u"茔",
u"塏": u"垲",
u"塒": u"埘",
u"塗": u"涂",
u"塚": u"冢",
u"塢": u"坞",
u"塤": u"埙",
u"塵": u"尘",
u"塹": u"堑",
u"墊": u"垫",
u"墜": u"坠",
u"墮": u"堕",
u"墳": u"坟",
u"墻": u"墙",
u"墾": u"垦",
u"壇": u"坛",
u"壈": u"𡒄",
u"壋": u"垱",
u"壓": u"压",
u"壘": u"垒",
u"壙": u"圹",
u"壚": u"垆",
u"壞": u"坏",
u"壟": u"垄",
u"壠": u"垅",
u"壢": u"坜",
u"壩": u"坝",
u"壯": u"壮",
u"壺": u"壶",
u"壼": u"壸",
u"壽": u"寿",
u"夠": u"够",
u"夢": u"梦",
u"夾": u"夹",
u"奐": u"奂",
u"奧": u"奥",
u"奩": u"奁",
u"奪": u"夺",
u"奬": u"奖",
u"奮": u"奋",
u"奼": u"姹",
u"妝": u"妆",
u"姍": u"姗",
u"姜": u"姜",
u"姦": u"奸",
u"娛": u"娱",
u"婁": u"娄",
u"婦": u"妇",
u"婭": u"娅",
u"媧": u"娲",
u"媯": u"妫",
u"媼": u"媪",
u"媽": u"妈",
u"嫗": u"妪",
u"嫵": u"妩",
u"嫻": u"娴",
u"嫿": u"婳",
u"嬀": u"妫",
u"嬈": u"娆",
u"嬋": u"婵",
u"嬌": u"娇",
u"嬙": u"嫱",
u"嬡": u"嫒",
u"嬤": u"嬷",
u"嬪": u"嫔",
u"嬰": u"婴",
u"嬸": u"婶",
u"孌": u"娈",
u"孫": u"孙",
u"學": u"学",
u"孿": u"孪",
u"宁": u"宁",
u"宮": u"宫",
u"寢": u"寝",
u"實": u"实",
u"寧": u"宁",
u"審": u"审",
u"寫": u"写",
u"寬": u"宽",
u"寵": u"宠",
u"寶": u"宝",
u"將": u"将",
u"專": u"专",
u"尋": u"寻",
u"對": u"对",
u"導": u"导",
u"尷": u"尴",
u"屆": u"届",
u"屍": u"尸",
u"屓": u"屃",
u"屜": u"屉",
u"屢": u"屡",
u"層": u"层",
u"屨": u"屦",
u"屬": u"属",
u"岡": u"冈",
u"峴": u"岘",
u"島": u"岛",
u"峽": u"峡",
u"崍": u"崃",
u"崗": u"岗",
u"崢": u"峥",
u"崬": u"岽",
u"嵐": u"岚",
u"嶁": u"嵝",
u"嶄": u"崭",
u"嶇": u"岖",
u"嶔": u"嵚",
u"嶗": u"崂",
u"嶠": u"峤",
u"嶢": u"峣",
u"嶧": u"峄",
u"嶮": u"崄",
u"嶴": u"岙",
u"嶸": u"嵘",
u"嶺": u"岭",
u"嶼": u"屿",
u"嶽": u"岳",
u"巋": u"岿",
u"巒": u"峦",
u"巔": u"巅",
u"巰": u"巯",
u"帘": u"帘",
u"帥": u"帅",
u"師": u"师",
u"帳": u"帐",
u"帶": u"带",
u"幀": u"帧",
u"幃": u"帏",
u"幗": u"帼",
u"幘": u"帻",
u"幟": u"帜",
u"幣": u"币",
u"幫": u"帮",
u"幬": u"帱",
u"幹": u"干",
u"幺": u"么",
u"幾": u"几",
u"广": u"广",
u"庫": u"库",
u"廁": u"厕",
u"廂": u"厢",
u"廄": u"厩",
u"廈": u"厦",
u"廚": u"厨",
u"廝": u"厮",
u"廟": u"庙",
u"廠": u"厂",
u"廡": u"庑",
u"廢": u"废",
u"廣": u"广",
u"廩": u"廪",
u"廬": u"庐",
u"廳": u"厅",
u"弒": u"弑",
u"弳": u"弪",
u"張": u"张",
u"強": u"强",
u"彆": u"别",
u"彈": u"弹",
u"彌": u"弥",
u"彎": u"弯",
u"彙": u"汇",
u"彞": u"彝",
u"彥": u"彦",
u"征": u"征",
u"後": u"后",
u"徑": u"径",
u"從": u"从",
u"徠": u"徕",
u"復": u"复",
u"徵": u"征",
u"徹": u"彻",
u"志": u"志",
u"恆": u"恒",
u"恥": u"耻",
u"悅": u"悦",
u"悞": u"悮",
u"悵": u"怅",
u"悶": u"闷",
u"惡": u"恶",
u"惱": u"恼",
u"惲": u"恽",
u"惻": u"恻",
u"愛": u"爱",
u"愜": u"惬",
u"愨": u"悫",
u"愴": u"怆",
u"愷": u"恺",
u"愾": u"忾",
u"愿": u"愿",
u"慄": u"栗",
u"態": u"态",
u"慍": u"愠",
u"慘": u"惨",
u"慚": u"惭",
u"慟": u"恸",
u"慣": u"惯",
u"慤": u"悫",
u"慪": u"怄",
u"慫": u"怂",
u"慮": u"虑",
u"慳": u"悭",
u"慶": u"庆",
u"憂": u"忧",
u"憊": u"惫",
u"憐": u"怜",
u"憑": u"凭",
u"憒": u"愦",
u"憚": u"惮",
u"憤": u"愤",
u"憫": u"悯",
u"憮": u"怃",
u"憲": u"宪",
u"憶": u"忆",
u"懇": u"恳",
u"應": u"应",
u"懌": u"怿",
u"懍": u"懔",
u"懞": u"蒙",
u"懟": u"怼",
u"懣": u"懑",
u"懨": u"恹",
u"懲": u"惩",
u"懶": u"懒",
u"懷": u"怀",
u"懸": u"悬",
u"懺": u"忏",
u"懼": u"惧",
u"懾": u"慑",
u"戀": u"恋",
u"戇": u"戆",
u"戔": u"戋",
u"戧": u"戗",
u"戩": u"戬",
u"戰": u"战",
u"戱": u"戯",
u"戲": u"戏",
u"戶": u"户",
u"担": u"担",
u"拋": u"抛",
u"挩": u"捝",
u"挾": u"挟",
u"捨": u"舍",
u"捫": u"扪",
u"据": u"据",
u"掃": u"扫",
u"掄": u"抡",
u"掗": u"挜",
u"掙": u"挣",
u"掛": u"挂",
u"採": u"采",
u"揀": u"拣",
u"揚": u"扬",
u"換": u"换",
u"揮": u"挥",
u"損": u"损",
u"搖": u"摇",
u"搗": u"捣",
u"搵": u"揾",
u"搶": u"抢",
u"摑": u"掴",
u"摜": u"掼",
u"摟": u"搂",
u"摯": u"挚",
u"摳": u"抠",
u"摶": u"抟",
u"摺": u"折",
u"摻": u"掺",
u"撈": u"捞",
u"撏": u"挦",
u"撐": u"撑",
u"撓": u"挠",
u"撝": u"㧑",
u"撟": u"挢",
u"撣": u"掸",
u"撥": u"拨",
u"撫": u"抚",
u"撲": u"扑",
u"撳": u"揿",
u"撻": u"挞",
u"撾": u"挝",
u"撿": u"捡",
u"擁": u"拥",
u"擄": u"掳",
u"擇": u"择",
u"擊": u"击",
u"擋": u"挡",
u"擓": u"㧟",
u"擔": u"担",
u"據": u"据",
u"擠": u"挤",
u"擬": u"拟",
u"擯": u"摈",
u"擰": u"拧",
u"擱": u"搁",
u"擲": u"掷",
u"擴": u"扩",
u"擷": u"撷",
u"擺": u"摆",
u"擻": u"擞",
u"擼": u"撸",
u"擾": u"扰",
u"攄": u"摅",
u"攆": u"撵",
u"攏": u"拢",
u"攔": u"拦",
u"攖": u"撄",
u"攙": u"搀",
u"攛": u"撺",
u"攜": u"携",
u"攝": u"摄",
u"攢": u"攒",
u"攣": u"挛",
u"攤": u"摊",
u"攪": u"搅",
u"攬": u"揽",
u"敗": u"败",
u"敘": u"叙",
u"敵": u"敌",
u"數": u"数",
u"斂": u"敛",
u"斃": u"毙",
u"斕": u"斓",
u"斗": u"斗",
u"斬": u"斩",
u"斷": u"断",
u"於": u"于",
u"時": u"时",
u"晉": u"晋",
u"晝": u"昼",
u"暈": u"晕",
u"暉": u"晖",
u"暘": u"旸",
u"暢": u"畅",
u"暫": u"暂",
u"曄": u"晔",
u"曆": u"历",
u"曇": u"昙",
u"曉": u"晓",
u"曏": u"向",
u"曖": u"暧",
u"曠": u"旷",
u"曨": u"昽",
u"曬": u"晒",
u"書": u"书",
u"會": u"会",
u"朧": u"胧",
u"朮": u"术",
u"术": u"术",
u"朴": u"朴",
u"東": u"东",
u"杴": u"锨",
u"极": u"极",
u"柜": u"柜",
u"柵": u"栅",
u"桿": u"杆",
u"梔": u"栀",
u"梘": u"枧",
u"條": u"条",
u"梟": u"枭",
u"梲": u"棁",
u"棄": u"弃",
u"棖": u"枨",
u"棗": u"枣",
u"棟": u"栋",
u"棧": u"栈",
u"棲": u"栖",
u"棶": u"梾",
u"椏": u"桠",
u"楊": u"杨",
u"楓": u"枫",
u"楨": u"桢",
u"業": u"业",
u"極": u"极",
u"榪": u"杩",
u"榮": u"荣",
u"榲": u"榅",
u"榿": u"桤",
u"構": u"构",
u"槍": u"枪",
u"槤": u"梿",
u"槧": u"椠",
u"槨": u"椁",
u"槳": u"桨",
u"樁": u"桩",
u"樂": u"乐",
u"樅": u"枞",
u"樓": u"楼",
u"標": u"标",
u"樞": u"枢",
u"樣": u"样",
u"樸": u"朴",
u"樹": u"树",
u"樺": u"桦",
u"橈": u"桡",
u"橋": u"桥",
u"機": u"机",
u"橢": u"椭",
u"橫": u"横",
u"檁": u"檩",
u"檉": u"柽",
u"檔": u"档",
u"檜": u"桧",
u"檟": u"槚",
u"檢": u"检",
u"檣": u"樯",
u"檮": u"梼",
u"檯": u"台",
u"檳": u"槟",
u"檸": u"柠",
u"檻": u"槛",
u"櫃": u"柜",
u"櫓": u"橹",
u"櫚": u"榈",
u"櫛": u"栉",
u"櫝": u"椟",
u"櫞": u"橼",
u"櫟": u"栎",
u"櫥": u"橱",
u"櫧": u"槠",
u"櫨": u"栌",
u"櫪": u"枥",
u"櫫": u"橥",
u"櫬": u"榇",
u"櫱": u"蘖",
u"櫳": u"栊",
u"櫸": u"榉",
u"櫻": u"樱",
u"欄": u"栏",
u"權": u"权",
u"欏": u"椤",
u"欒": u"栾",
u"欖": u"榄",
u"欞": u"棂",
u"欽": u"钦",
u"歐": u"欧",
u"歟": u"欤",
u"歡": u"欢",
u"歲": u"岁",
u"歷": u"历",
u"歸": u"归",
u"歿": u"殁",
u"殘": u"残",
u"殞": u"殒",
u"殤": u"殇",
u"殨": u"㱮",
u"殫": u"殚",
u"殮": u"殓",
u"殯": u"殡",
u"殰": u"㱩",
u"殲": u"歼",
u"殺": u"杀",
u"殻": u"壳",
u"殼": u"壳",
u"毀": u"毁",
u"毆": u"殴",
u"毿": u"毵",
u"氂": u"牦",
u"氈": u"毡",
u"氌": u"氇",
u"氣": u"气",
u"氫": u"氢",
u"氬": u"氩",
u"氳": u"氲",
u"汙": u"污",
u"決": u"决",
u"沒": u"没",
u"沖": u"冲",
u"況": u"况",
u"洶": u"汹",
u"浹": u"浃",
u"涂": u"涂",
u"涇": u"泾",
u"涼": u"凉",
u"淀": u"淀",
u"淒": u"凄",
u"淚": u"泪",
u"淥": u"渌",
u"淨": u"净",
u"淩": u"凌",
u"淪": u"沦",
u"淵": u"渊",
u"淶": u"涞",
u"淺": u"浅",
u"渙": u"涣",
u"減": u"减",
u"渦": u"涡",
u"測": u"测",
u"渾": u"浑",
u"湊": u"凑",
u"湞": u"浈",
u"湯": u"汤",
u"溈": u"沩",
u"準": u"准",
u"溝": u"沟",
u"溫": u"温",
u"滄": u"沧",
u"滅": u"灭",
u"滌": u"涤",
u"滎": u"荥",
u"滬": u"沪",
u"滯": u"滞",
u"滲": u"渗",
u"滷": u"卤",
u"滸": u"浒",
u"滻": u"浐",
u"滾": u"滚",
u"滿": u"满",
u"漁": u"渔",
u"漚": u"沤",
u"漢": u"汉",
u"漣": u"涟",
u"漬": u"渍",
u"漲": u"涨",
u"漵": u"溆",
u"漸": u"渐",
u"漿": u"浆",
u"潁": u"颍",
u"潑": u"泼",
u"潔": u"洁",
u"潙": u"沩",
u"潛": u"潜",
u"潤": u"润",
u"潯": u"浔",
u"潰": u"溃",
u"潷": u"滗",
u"潿": u"涠",
u"澀": u"涩",
u"澆": u"浇",
u"澇": u"涝",
u"澐": u"沄",
u"澗": u"涧",
u"澠": u"渑",
u"澤": u"泽",
u"澦": u"滪",
u"澩": u"泶",
u"澮": u"浍",
u"澱": u"淀",
u"濁": u"浊",
u"濃": u"浓",
u"濕": u"湿",
u"濘": u"泞",
u"濛": u"蒙",
u"濟": u"济",
u"濤": u"涛",
u"濫": u"滥",
u"濰": u"潍",
u"濱": u"滨",
u"濺": u"溅",
u"濼": u"泺",
u"濾": u"滤",
u"瀅": u"滢",
u"瀆": u"渎",
u"瀇": u"㲿",
u"瀉": u"泻",
u"瀋": u"沈",
u"瀏": u"浏",
u"瀕": u"濒",
u"瀘": u"泸",
u"瀝": u"沥",
u"瀟": u"潇",
u"瀠": u"潆",
u"瀦": u"潴",
u"瀧": u"泷",
u"瀨": u"濑",
u"瀰": u"弥",
u"瀲": u"潋",
u"瀾": u"澜",
u"灃": u"沣",
u"灄": u"滠",
u"灑": u"洒",
u"灕": u"漓",
u"灘": u"滩",
u"灝": u"灏",
u"灠": u"漤",
u"灣": u"湾",
u"灤": u"滦",
u"灧": u"滟",
u"災": u"灾",
u"為": u"为",
u"烏": u"乌",
u"烴": u"烃",
u"無": u"无",
u"煉": u"炼",
u"煒": u"炜",
u"煙": u"烟",
u"煢": u"茕",
u"煥": u"焕",
u"煩": u"烦",
u"煬": u"炀",
u"煱": u"㶽",
u"熅": u"煴",
u"熒": u"荧",
u"熗": u"炝",
u"熱": u"热",
u"熲": u"颎",
u"熾": u"炽",
u"燁": u"烨",
u"燈": u"灯",
u"燉": u"炖",
u"燒": u"烧",
u"燙": u"烫",
u"燜": u"焖",
u"營": u"营",
u"燦": u"灿",
u"燭": u"烛",
u"燴": u"烩",
u"燶": u"㶶",
u"燼": u"烬",
u"燾": u"焘",
u"爍": u"烁",
u"爐": u"炉",
u"爛": u"烂",
u"爭": u"争",
u"爲": u"为",
u"爺": u"爷",
u"爾": u"尔",
u"牆": u"墙",
u"牘": u"牍",
u"牽": u"牵",
u"犖": u"荦",
u"犢": u"犊",
u"犧": u"牺",
u"狀": u"状",
u"狹": u"狭",
u"狽": u"狈",
u"猙": u"狰",
u"猶": u"犹",
u"猻": u"狲",
u"獁": u"犸",
u"獄": u"狱",
u"獅": u"狮",
u"獎": u"奖",
u"獨": u"独",
u"獪": u"狯",
u"獫": u"猃",
u"獮": u"狝",
u"獰": u"狞",
u"獱": u"㺍",
u"獲": u"获",
u"獵": u"猎",
u"獷": u"犷",
u"獸": u"兽",
u"獺": u"獭",
u"獻": u"献",
u"獼": u"猕",
u"玀": u"猡",
u"現": u"现",
u"琺": u"珐",
u"琿": u"珲",
u"瑋": u"玮",
u"瑒": u"玚",
u"瑣": u"琐",
u"瑤": u"瑶",
u"瑩": u"莹",
u"瑪": u"玛",
u"瑲": u"玱",
u"璉": u"琏",
u"璣": u"玑",
u"璦": u"瑷",
u"璫": u"珰",
u"環": u"环",
u"璽": u"玺",
u"瓊": u"琼",
u"瓏": u"珑",
u"瓔": u"璎",
u"瓚": u"瓒",
u"甌": u"瓯",
u"產": u"产",
u"産": u"产",
u"畝": u"亩",
u"畢": u"毕",
u"異": u"异",
u"畵": u"画",
u"當": u"当",
u"疇": u"畴",
u"疊": u"叠",
u"痙": u"痉",
u"痾": u"疴",
u"瘂": u"痖",
u"瘋": u"疯",
u"瘍": u"疡",
u"瘓": u"痪",
u"瘞": u"瘗",
u"瘡": u"疮",
u"瘧": u"疟",
u"瘮": u"瘆",
u"瘲": u"疭",
u"瘺": u"瘘",
u"瘻": u"瘘",
u"療": u"疗",
u"癆": u"痨",
u"癇": u"痫",
u"癉": u"瘅",
u"癘": u"疠",
u"癟": u"瘪",
u"癢": u"痒",
u"癤": u"疖",
u"癥": u"症",
u"癧": u"疬",
u"癩": u"癞",
u"癬": u"癣",
u"癭": u"瘿",
u"癮": u"瘾",
u"癰": u"痈",
u"癱": u"瘫",
u"癲": u"癫",
u"發": u"发",
u"皚": u"皑",
u"皰": u"疱",
u"皸": u"皲",
u"皺": u"皱",
u"盃": u"杯",
u"盜": u"盗",
u"盞": u"盏",
u"盡": u"尽",
u"監": u"监",
u"盤": u"盘",
u"盧": u"卢",
u"盪": u"荡",
u"眥": u"眦",
u"眾": u"众",
u"睏": u"困",
u"睜": u"睁",
u"睞": u"睐",
u"瞘": u"眍",
u"瞜": u"䁖",
u"瞞": u"瞒",
u"瞭": u"了",
u"瞶": u"瞆",
u"瞼": u"睑",
u"矇": u"蒙",
u"矓": u"眬",
u"矚": u"瞩",
u"矯": u"矫",
u"硃": u"朱",
u"硜": u"硁",
u"硤": u"硖",
u"硨": u"砗",
u"确": u"确",
u"硯": u"砚",
u"碩": u"硕",
u"碭": u"砀",
u"碸": u"砜",
u"確": u"确",
u"碼": u"码",
u"磑": u"硙",
u"磚": u"砖",
u"磣": u"碜",
u"磧": u"碛",
u"磯": u"矶",
u"磽": u"硗",
u"礆": u"硷",
u"礎": u"础",
u"礙": u"碍",
u"礦": u"矿",
u"礪": u"砺",
u"礫": u"砾",
u"礬": u"矾",
u"礱": u"砻",
u"祿": u"禄",
u"禍": u"祸",
u"禎": u"祯",
u"禕": u"祎",
u"禡": u"祃",
u"禦": u"御",
u"禪": u"禅",
u"禮": u"礼",
u"禰": u"祢",
u"禱": u"祷",
u"禿": u"秃",
u"秈": u"籼",
u"种": u"种",
u"稅": u"税",
u"稈": u"秆",
u"稏": u"䅉",
u"稟": u"禀",
u"種": u"种",
u"稱": u"称",
u"穀": u"谷",
u"穌": u"稣",
u"積": u"积",
u"穎": u"颖",
u"穠": u"秾",
u"穡": u"穑",
u"穢": u"秽",
u"穩": u"稳",
u"穫": u"获",
u"穭": u"稆",
u"窩": u"窝",
u"窪": u"洼",
u"窮": u"穷",
u"窯": u"窑",
u"窵": u"窎",
u"窶": u"窭",
u"窺": u"窥",
u"竄": u"窜",
u"竅": u"窍",
u"竇": u"窦",
u"竈": u"灶",
u"竊": u"窃",
u"竪": u"竖",
u"競": u"竞",
u"筆": u"笔",
u"筍": u"笋",
u"筑": u"筑",
u"筧": u"笕",
u"筴": u"䇲",
u"箋": u"笺",
u"箏": u"筝",
u"節": u"节",
u"範": u"范",
u"築": u"筑",
u"篋": u"箧",
u"篔": u"筼",
u"篤": u"笃",
u"篩": u"筛",
u"篳": u"筚",
u"簀": u"箦",
u"簍": u"篓",
u"簞": u"箪",
u"簡": u"简",
u"簣": u"篑",
u"簫": u"箫",
u"簹": u"筜",
u"簽": u"签",
u"簾": u"帘",
u"籃": u"篮",
u"籌": u"筹",
u"籖": u"签",
u"籙": u"箓",
u"籜": u"箨",
u"籟": u"籁",
u"籠": u"笼",
u"籩": u"笾",
u"籪": u"簖",
u"籬": u"篱",
u"籮": u"箩",
u"籲": u"吁",
u"粵": u"粤",
u"糝": u"糁",
u"糞": u"粪",
u"糧": u"粮",
u"糰": u"团",
u"糲": u"粝",
u"糴": u"籴",
u"糶": u"粜",
u"糹": u"纟",
u"糾": u"纠",
u"紀": u"纪",
u"紂": u"纣",
u"約": u"约",
u"紅": u"红",
u"紆": u"纡",
u"紇": u"纥",
u"紈": u"纨",
u"紉": u"纫",
u"紋": u"纹",
u"納": u"纳",
u"紐": u"纽",
u"紓": u"纾",
u"純": u"纯",
u"紕": u"纰",
u"紖": u"纼",
u"紗": u"纱",
u"紘": u"纮",
u"紙": u"纸",
u"級": u"级",
u"紛": u"纷",
u"紜": u"纭",
u"紝": u"纴",
u"紡": u"纺",
u"紬": u"䌷",
u"細": u"细",
u"紱": u"绂",
u"紲": u"绁",
u"紳": u"绅",
u"紵": u"纻",
u"紹": u"绍",
u"紺": u"绀",
u"紼": u"绋",
u"紿": u"绐",
u"絀": u"绌",
u"終": u"终",
u"組": u"组",
u"絅": u"䌹",
u"絆": u"绊",
u"絎": u"绗",
u"結": u"结",
u"絕": u"绝",
u"絛": u"绦",
u"絝": u"绔",
u"絞": u"绞",
u"絡": u"络",
u"絢": u"绚",
u"給": u"给",
u"絨": u"绒",
u"絰": u"绖",
u"統": u"统",
u"絲": u"丝",
u"絳": u"绛",
u"絶": u"绝",
u"絹": u"绢",
u"綁": u"绑",
u"綃": u"绡",
u"綆": u"绠",
u"綈": u"绨",
u"綉": u"绣",
u"綌": u"绤",
u"綏": u"绥",
u"綐": u"䌼",
u"經": u"经",
u"綜": u"综",
u"綞": u"缍",
u"綠": u"绿",
u"綢": u"绸",
u"綣": u"绻",
u"綫": u"线",
u"綬": u"绶",
u"維": u"维",
u"綯": u"绹",
u"綰": u"绾",
u"綱": u"纲",
u"網": u"网",
u"綳": u"绷",
u"綴": u"缀",
u"綵": u"䌽",
u"綸": u"纶",
u"綹": u"绺",
u"綺": u"绮",
u"綻": u"绽",
u"綽": u"绰",
u"綾": u"绫",
u"綿": u"绵",
u"緄": u"绲",
u"緇": u"缁",
u"緊": u"紧",
u"緋": u"绯",
u"緑": u"绿",
u"緒": u"绪",
u"緓": u"绬",
u"緔": u"绱",
u"緗": u"缃",
u"緘": u"缄",
u"緙": u"缂",
u"線": u"线",
u"緝": u"缉",
u"緞": u"缎",
u"締": u"缔",
u"緡": u"缗",
u"緣": u"缘",
u"緦": u"缌",
u"編": u"编",
u"緩": u"缓",
u"緬": u"缅",
u"緯": u"纬",
u"緱": u"缑",
u"緲": u"缈",
u"練": u"练",
u"緶": u"缏",
u"緹": u"缇",
u"緻": u"致",
u"縈": u"萦",
u"縉": u"缙",
u"縊": u"缢",
u"縋": u"缒",
u"縐": u"绉",
u"縑": u"缣",
u"縕": u"缊",
u"縗": u"缞",
u"縛": u"缚",
u"縝": u"缜",
u"縞": u"缟",
u"縟": u"缛",
u"縣": u"县",
u"縧": u"绦",
u"縫": u"缝",
u"縭": u"缡",
u"縮": u"缩",
u"縱": u"纵",
u"縲": u"缧",
u"縳": u"䌸",
u"縴": u"纤",
u"縵": u"缦",
u"縶": u"絷",
u"縷": u"缕",
u"縹": u"缥",
u"總": u"总",
u"績": u"绩",
u"繃": u"绷",
u"繅": u"缫",
u"繆": u"缪",
u"繒": u"缯",
u"織": u"织",
u"繕": u"缮",
u"繚": u"缭",
u"繞": u"绕",
u"繡": u"绣",
u"繢": u"缋",
u"繩": u"绳",
u"繪": u"绘",
u"繫": u"系",
u"繭": u"茧",
u"繮": u"缰",
u"繯": u"缳",
u"繰": u"缲",
u"繳": u"缴",
u"繸": u"䍁",
u"繹": u"绎",
u"繼": u"继",
u"繽": u"缤",
u"繾": u"缱",
u"繿": u"䍀",
u"纈": u"缬",
u"纊": u"纩",
u"續": u"续",
u"纍": u"累",
u"纏": u"缠",
u"纓": u"缨",
u"纔": u"才",
u"纖": u"纤",
u"纘": u"缵",
u"纜": u"缆",
u"缽": u"钵",
u"罈": u"坛",
u"罌": u"罂",
u"罰": u"罚",
u"罵": u"骂",
u"罷": u"罢",
u"羅": u"罗",
u"羆": u"罴",
u"羈": u"羁",
u"羋": u"芈",
u"羥": u"羟",
u"義": u"义",
u"習": u"习",
u"翹": u"翘",
u"耬": u"耧",
u"耮": u"耢",
u"聖": u"圣",
u"聞": u"闻",
u"聯": u"联",
u"聰": u"聪",
u"聲": u"声",
u"聳": u"耸",
u"聵": u"聩",
u"聶": u"聂",
u"職": u"职",
u"聹": u"聍",
u"聽": u"听",
u"聾": u"聋",
u"肅": u"肃",
u"胜": u"胜",
u"脅": u"胁",
u"脈": u"脉",
u"脛": u"胫",
u"脫": u"脱",
u"脹": u"胀",
u"腊": u"腊",
u"腎": u"肾",
u"腖": u"胨",
u"腡": u"脶",
u"腦": u"脑",
u"腫": u"肿",
u"腳": u"脚",
u"腸": u"肠",
u"膃": u"腽",
u"膚": u"肤",
u"膠": u"胶",
u"膩": u"腻",
u"膽": u"胆",
u"膾": u"脍",
u"膿": u"脓",
u"臉": u"脸",
u"臍": u"脐",
u"臏": u"膑",
u"臘": u"腊",
u"臚": u"胪",
u"臟": u"脏",
u"臠": u"脔",
u"臢": u"臜",
u"臥": u"卧",
u"臨": u"临",
u"臺": u"台",
u"與": u"与",
u"興": u"兴",
u"舉": u"举",
u"舊": u"旧",
u"艙": u"舱",
u"艤": u"舣",
u"艦": u"舰",
u"艫": u"舻",
u"艱": u"艰",
u"艷": u"艳",
u"芻": u"刍",
u"苧": u"苎",
u"苹": u"苹",
u"范": u"范",
u"茲": u"兹",
u"荊": u"荆",
u"莊": u"庄",
u"莖": u"茎",
u"莢": u"荚",
u"莧": u"苋",
u"華": u"华",
u"萇": u"苌",
u"萊": u"莱",
u"萬": u"万",
u"萵": u"莴",
u"葉": u"叶",
u"葒": u"荭",
u"著": u"着",
u"著名": u"著名",
u"葤": u"荮",
u"葦": u"苇",
u"葯": u"药",
u"葷": u"荤",
u"蒓": u"莼",
u"蒔": u"莳",
u"蒞": u"莅",
u"蒼": u"苍",
u"蓀": u"荪",
u"蓋": u"盖",
u"蓮": u"莲",
u"蓯": u"苁",
u"蓴": u"莼",
u"蓽": u"荜",
u"蔔": u"卜",
u"蔞": u"蒌",
u"蔣": u"蒋",
u"蔥": u"葱",
u"蔦": u"茑",
u"蔭": u"荫",
u"蕁": u"荨",
u"蕆": u"蒇",
u"蕎": u"荞",
u"蕒": u"荬",
u"蕓": u"芸",
u"蕕": u"莸",
u"蕘": u"荛",
u"蕢": u"蒉",
u"蕩": u"荡",
u"蕪": u"芜",
u"蕭": u"萧",
u"蕷": u"蓣",
u"薀": u"蕰",
u"薈": u"荟",
u"薊": u"蓟",
u"薌": u"芗",
u"薔": u"蔷",
u"薘": u"荙",
u"薟": u"莶",
u"薦": u"荐",
u"薩": u"萨",
u"薳": u"䓕",
u"薴": u"苧",
u"薺": u"荠",
u"藉": u"借",
u"藍": u"蓝",
u"藎": u"荩",
u"藝": u"艺",
u"藥": u"药",
u"藪": u"薮",
u"藴": u"蕴",
u"藶": u"苈",
u"藹": u"蔼",
u"藺": u"蔺",
u"蘄": u"蕲",
u"蘆": u"芦",
u"蘇": u"苏",
u"蘊": u"蕴",
u"蘋": u"苹",
u"蘚": u"藓",
u"蘞": u"蔹",
u"蘢": u"茏",
u"蘭": u"兰",
u"蘺": u"蓠",
u"蘿": u"萝",
u"虆": u"蔂",
u"處": u"处",
u"虛": u"虚",
u"虜": u"虏",
u"號": u"号",
u"虧": u"亏",
u"虫": u"虫",
u"虯": u"虬",
u"蛺": u"蛱",
u"蛻": u"蜕",
u"蜆": u"蚬",
u"蜡": u"蜡",
u"蝕": u"蚀",
u"蝟": u"猬",
u"蝦": u"虾",
u"蝸": u"蜗",
u"螄": u"蛳",
u"螞": u"蚂",
u"螢": u"萤",
u"螮": u"䗖",
u"螻": u"蝼",
u"螿": u"螀",
u"蟄": u"蛰",
u"蟈": u"蝈",
u"蟎": u"螨",
u"蟣": u"虮",
u"蟬": u"蝉",
u"蟯": u"蛲",
u"蟲": u"虫",
u"蟶": u"蛏",
u"蟻": u"蚁",
u"蠅": u"蝇",
u"蠆": u"虿",
u"蠐": u"蛴",
u"蠑": u"蝾",
u"蠟": u"蜡",
u"蠣": u"蛎",
u"蠨": u"蟏",
u"蠱": u"蛊",
u"蠶": u"蚕",
u"蠻": u"蛮",
u"衆": u"众",
u"衊": u"蔑",
u"術": u"术",
u"衕": u"同",
u"衚": u"胡",
u"衛": u"卫",
u"衝": u"冲",
u"衹": u"只",
u"袞": u"衮",
u"裊": u"袅",
u"裏": u"里",
u"補": u"补",
u"裝": u"装",
u"裡": u"里",
u"製": u"制",
u"複": u"复",
u"褌": u"裈",
u"褘": u"袆",
u"褲": u"裤",
u"褳": u"裢",
u"褸": u"褛",
u"褻": u"亵",
u"襇": u"裥",
u"襏": u"袯",
u"襖": u"袄",
u"襝": u"裣",
u"襠": u"裆",
u"襤": u"褴",
u"襪": u"袜",
u"襬": u"䙓",
u"襯": u"衬",
u"襲": u"袭",
u"覆": u"复",
u"覆蓋": u"覆盖",
u"翻來覆去": u"翻来覆去",
u"見": u"见",
u"覎": u"觃",
u"規": u"规",
u"覓": u"觅",
u"視": u"视",
u"覘": u"觇",
u"覡": u"觋",
u"覥": u"觍",
u"覦": u"觎",
u"親": u"亲",
u"覬": u"觊",
u"覯": u"觏",
u"覲": u"觐",
u"覷": u"觑",
u"覺": u"觉",
u"覽": u"览",
u"覿": u"觌",
u"觀": u"观",
u"觴": u"觞",
u"觶": u"觯",
u"觸": u"触",
u"訁": u"讠",
u"訂": u"订",
u"訃": u"讣",
u"計": u"计",
u"訊": u"讯",
u"訌": u"讧",
u"討": u"讨",
u"訐": u"讦",
u"訒": u"讱",
u"訓": u"训",
u"訕": u"讪",
u"訖": u"讫",
u"託": u"讬",
u"記": u"记",
u"訛": u"讹",
u"訝": u"讶",
u"訟": u"讼",
u"訢": u"䜣",
u"訣": u"诀",
u"訥": u"讷",
u"訩": u"讻",
u"訪": u"访",
u"設": u"设",
u"許": u"许",
u"訴": u"诉",
u"訶": u"诃",
u"診": u"诊",
u"註": u"注",
u"詁": u"诂",
u"詆": u"诋",
u"詎": u"讵",
u"詐": u"诈",
u"詒": u"诒",
u"詔": u"诏",
u"評": u"评",
u"詖": u"诐",
u"詗": u"诇",
u"詘": u"诎",
u"詛": u"诅",
u"詞": u"词",
u"詠": u"咏",
u"詡": u"诩",
u"詢": u"询",
u"詣": u"诣",
u"試": u"试",
u"詩": u"诗",
u"詫": u"诧",
u"詬": u"诟",
u"詭": u"诡",
u"詮": u"诠",
u"詰": u"诘",
u"話": u"话",
u"該": u"该",
u"詳": u"详",
u"詵": u"诜",
u"詼": u"诙",
u"詿": u"诖",
u"誄": u"诔",
u"誅": u"诛",
u"誆": u"诓",
u"誇": u"夸",
u"誌": u"志",
u"認": u"认",
u"誑": u"诳",
u"誒": u"诶",
u"誕": u"诞",
u"誘": u"诱",
u"誚": u"诮",
u"語": u"语",
u"誠": u"诚",
u"誡": u"诫",
u"誣": u"诬",
u"誤": u"误",
u"誥": u"诰",
u"誦": u"诵",
u"誨": u"诲",
u"說": u"说",
u"説": u"说",
u"誰": u"谁",
u"課": u"课",
u"誶": u"谇",
u"誹": u"诽",
u"誼": u"谊",
u"誾": u"訚",
u"調": u"调",
u"諂": u"谄",
u"諄": u"谆",
u"談": u"谈",
u"諉": u"诿",
u"請": u"请",
u"諍": u"诤",
u"諏": u"诹",
u"諑": u"诼",
u"諒": u"谅",
u"論": u"论",
u"諗": u"谂",
u"諛": u"谀",
u"諜": u"谍",
u"諝": u"谞",
u"諞": u"谝",
u"諢": u"诨",
u"諤": u"谔",
u"諦": u"谛",
u"諧": u"谐",
u"諫": u"谏",
u"諭": u"谕",
u"諮": u"谘",
u"諱": u"讳",
u"諳": u"谙",
u"諶": u"谌",
u"諷": u"讽",
u"諸": u"诸",
u"諺": u"谚",
u"諼": u"谖",
u"諾": u"诺",
u"謀": u"谋",
u"謁": u"谒",
u"謂": u"谓",
u"謄": u"誊",
u"謅": u"诌",
u"謊": u"谎",
u"謎": u"谜",
u"謐": u"谧",
u"謔": u"谑",
u"謖": u"谡",
u"謗": u"谤",
u"謙": u"谦",
u"謚": u"谥",
u"講": u"讲",
u"謝": u"谢",
u"謠": u"谣",
u"謡": u"谣",
u"謨": u"谟",
u"謫": u"谪",
u"謬": u"谬",
u"謭": u"谫",
u"謳": u"讴",
u"謹": u"谨",
u"謾": u"谩",
u"譅": u"䜧",
u"證": u"证",
u"譎": u"谲",
u"譏": u"讥",
u"譖": u"谮",
u"識": u"识",
u"譙": u"谯",
u"譚": u"谭",
u"譜": u"谱",
u"譫": u"谵",
u"譯": u"译",
u"議": u"议",
u"譴": u"谴",
u"護": u"护",
u"譸": u"诪",
u"譽": u"誉",
u"譾": u"谫",
u"讀": u"读",
u"變": u"变",
u"讎": u"仇",
u"讎": u"雠",
u"讒": u"谗",
u"讓": u"让",
u"讕": u"谰",
u"讖": u"谶",
u"讜": u"谠",
u"讞": u"谳",
u"豈": u"岂",
u"豎": u"竖",
u"豐": u"丰",
u"豬": u"猪",
u"豶": u"豮",
u"貓": u"猫",
u"貙": u"䝙",
u"貝": u"贝",
u"貞": u"贞",
u"貟": u"贠",
u"負": u"负",
u"財": u"财",
u"貢": u"贡",
u"貧": u"贫",
u"貨": u"货",
u"販": u"贩",
u"貪": u"贪",
u"貫": u"贯",
u"責": u"责",
u"貯": u"贮",
u"貰": u"贳",
u"貲": u"赀",
u"貳": u"贰",
u"貴": u"贵",
u"貶": u"贬",
u"買": u"买",
u"貸": u"贷",
u"貺": u"贶",
u"費": u"费",
u"貼": u"贴",
u"貽": u"贻",
u"貿": u"贸",
u"賀": u"贺",
u"賁": u"贲",
u"賂": u"赂",
u"賃": u"赁",
u"賄": u"贿",
u"賅": u"赅",
u"資": u"资",
u"賈": u"贾",
u"賊": u"贼",
u"賑": u"赈",
u"賒": u"赊",
u"賓": u"宾",
u"賕": u"赇",
u"賙": u"赒",
u"賚": u"赉",
u"賜": u"赐",
u"賞": u"赏",
u"賠": u"赔",
u"賡": u"赓",
u"賢": u"贤",
u"賣": u"卖",
u"賤": u"贱",
u"賦": u"赋",
u"賧": u"赕",
u"質": u"质",
u"賫": u"赍",
u"賬": u"账",
u"賭": u"赌",
u"賰": u"䞐",
u"賴": u"赖",
u"賵": u"赗",
u"賺": u"赚",
u"賻": u"赙",
u"購": u"购",
u"賽": u"赛",
u"賾": u"赜",
u"贄": u"贽",
u"贅": u"赘",
u"贇": u"赟",
u"贈": u"赠",
u"贊": u"赞",
u"贋": u"赝",
u"贍": u"赡",
u"贏": u"赢",
u"贐": u"赆",
u"贓": u"赃",
u"贔": u"赑",
u"贖": u"赎",
u"贗": u"赝",
u"贛": u"赣",
u"贜": u"赃",
u"赬": u"赪",
u"趕": u"赶",
u"趙": u"赵",
u"趨": u"趋",
u"趲": u"趱",
u"跡": u"迹",
u"踐": u"践",
u"踴": u"踊",
u"蹌": u"跄",
u"蹕": u"跸",
u"蹣": u"蹒",
u"蹤": u"踪",
u"蹺": u"跷",
u"躂": u"跶",
u"躉": u"趸",
u"躊": u"踌",
u"躋": u"跻",
u"躍": u"跃",
u"躑": u"踯",
u"躒": u"跞",
u"躓": u"踬",
u"躕": u"蹰",
u"躚": u"跹",
u"躡": u"蹑",
u"躥": u"蹿",
u"躦": u"躜",
u"躪": u"躏",
u"軀": u"躯",
u"車": u"车",
u"軋": u"轧",
u"軌": u"轨",
u"軍": u"军",
u"軑": u"轪",
u"軒": u"轩",
u"軔": u"轫",
u"軛": u"轭",
u"軟": u"软",
u"軤": u"轷",
u"軫": u"轸",
u"軲": u"轱",
u"軸": u"轴",
u"軹": u"轵",
u"軺": u"轺",
u"軻": u"轲",
u"軼": u"轶",
u"軾": u"轼",
u"較": u"较",
u"輅": u"辂",
u"輇": u"辁",
u"輈": u"辀",
u"載": u"载",
u"輊": u"轾",
u"輒": u"辄",
u"輓": u"挽",
u"輔": u"辅",
u"輕": u"轻",
u"輛": u"辆",
u"輜": u"辎",
u"輝": u"辉",
u"輞": u"辋",
u"輟": u"辍",
u"輥": u"辊",
u"輦": u"辇",
u"輩": u"辈",
u"輪": u"轮",
u"輬": u"辌",
u"輯": u"辑",
u"輳": u"辏",
u"輸": u"输",
u"輻": u"辐",
u"輾": u"辗",
u"輿": u"舆",
u"轀": u"辒",
u"轂": u"毂",
u"轄": u"辖",
u"轅": u"辕",
u"轆": u"辘",
u"轉": u"转",
u"轍": u"辙",
u"轎": u"轿",
u"轔": u"辚",
u"轟": u"轰",
u"轡": u"辔",
u"轢": u"轹",
u"轤": u"轳",
u"辟": u"辟",
u"辦": u"办",
u"辭": u"辞",
u"辮": u"辫",
u"辯": u"辩",
u"農": u"农",
u"迴": u"回",
u"适": u"适",
u"逕": u"迳",
u"這": u"这",
u"連": u"连",
u"週": u"周",
u"進": u"进",
u"遊": u"游",
u"運": u"运",
u"過": u"过",
u"達": u"达",
u"違": u"违",
u"遙": u"遥",
u"遜": u"逊",
u"遞": u"递",
u"遠": u"远",
u"適": u"适",
u"遲": u"迟",
u"遷": u"迁",
u"選": u"选",
u"遺": u"遗",
u"遼": u"辽",
u"邁": u"迈",
u"還": u"还",
u"邇": u"迩",
u"邊": u"边",
u"邏": u"逻",
u"邐": u"逦",
u"郁": u"郁",
u"郟": u"郏",
u"郵": u"邮",
u"鄆": u"郓",
u"鄉": u"乡",
u"鄒": u"邹",
u"鄔": u"邬",
u"鄖": u"郧",
u"鄧": u"邓",
u"鄭": u"郑",
u"鄰": u"邻",
u"鄲": u"郸",
u"鄴": u"邺",
u"鄶": u"郐",
u"鄺": u"邝",
u"酇": u"酂",
u"酈": u"郦",
u"醖": u"酝",
u"醜": u"丑",
u"醞": u"酝",
u"醫": u"医",
u"醬": u"酱",
u"醱": u"酦",
u"釀": u"酿",
u"釁": u"衅",
u"釃": u"酾",
u"釅": u"酽",
u"采": u"采",
u"釋": u"释",
u"釐": u"厘",
u"釒": u"钅",
u"釓": u"钆",
u"釔": u"钇",
u"釕": u"钌",
u"釗": u"钊",
u"釘": u"钉",
u"釙": u"钋",
u"針": u"针",
u"釣": u"钓",
u"釤": u"钐",
u"釧": u"钏",
u"釩": u"钒",
u"釵": u"钗",
u"釷": u"钍",
u"釹": u"钕",
u"釺": u"钎",
u"鈀": u"钯",
u"鈁": u"钫",
u"鈃": u"钘",
u"鈄": u"钭",
u"鈈": u"钚",
u"鈉": u"钠",
u"鈍": u"钝",
u"鈎": u"钩",
u"鈐": u"钤",
u"鈑": u"钣",
u"鈒": u"钑",
u"鈔": u"钞",
u"鈕": u"钮",
u"鈞": u"钧",
u"鈣": u"钙",
u"鈥": u"钬",
u"鈦": u"钛",
u"鈧": u"钪",
u"鈮": u"铌",
u"鈰": u"铈",
u"鈳": u"钶",
u"鈴": u"铃",
u"鈷": u"钴",
u"鈸": u"钹",
u"鈹": u"铍",
u"鈺": u"钰",
u"鈽": u"钸",
u"鈾": u"铀",
u"鈿": u"钿",
u"鉀": u"钾",
u"鉅": u"钜",
u"鉈": u"铊",
u"鉉": u"铉",
u"鉋": u"铇",
u"鉍": u"铋",
u"鉑": u"铂",
u"鉕": u"钷",
u"鉗": u"钳",
u"鉚": u"铆",
u"鉛": u"铅",
u"鉞": u"钺",
u"鉢": u"钵",
u"鉤": u"钩",
u"鉦": u"钲",
u"鉬": u"钼",
u"鉭": u"钽",
u"鉶": u"铏",
u"鉸": u"铰",
u"鉺": u"铒",
u"鉻": u"铬",
u"鉿": u"铪",
u"銀": u"银",
u"銃": u"铳",
u"銅": u"铜",
u"銍": u"铚",
u"銑": u"铣",
u"銓": u"铨",
u"銖": u"铢",
u"銘": u"铭",
u"銚": u"铫",
u"銛": u"铦",
u"銜": u"衔",
u"銠": u"铑",
u"銣": u"铷",
u"銥": u"铱",
u"銦": u"铟",
u"銨": u"铵",
u"銩": u"铥",
u"銪": u"铕",
u"銫": u"铯",
u"銬": u"铐",
u"銱": u"铞",
u"銳": u"锐",
u"銷": u"销",
u"銹": u"锈",
u"銻": u"锑",
u"銼": u"锉",
u"鋁": u"铝",
u"鋃": u"锒",
u"鋅": u"锌",
u"鋇": u"钡",
u"鋌": u"铤",
u"鋏": u"铗",
u"鋒": u"锋",
u"鋙": u"铻",
u"鋝": u"锊",
u"鋟": u"锓",
u"鋣": u"铘",
u"鋤": u"锄",
u"鋥": u"锃",
u"鋦": u"锔",
u"鋨": u"锇",
u"鋩": u"铓",
u"鋪": u"铺",
u"鋭": u"锐",
u"鋮": u"铖",
u"鋯": u"锆",
u"鋰": u"锂",
u"鋱": u"铽",
u"鋶": u"锍",
u"鋸": u"锯",
u"鋼": u"钢",
u"錁": u"锞",
u"錄": u"录",
u"錆": u"锖",
u"錇": u"锫",
u"錈": u"锩",
u"錏": u"铔",
u"錐": u"锥",
u"錒": u"锕",
u"錕": u"锟",
u"錘": u"锤",
u"錙": u"锱",
u"錚": u"铮",
u"錛": u"锛",
u"錟": u"锬",
u"錠": u"锭",
u"錡": u"锜",
u"錢": u"钱",
u"錦": u"锦",
u"錨": u"锚",
u"錩": u"锠",
u"錫": u"锡",
u"錮": u"锢",
u"錯": u"错",
u"録": u"录",
u"錳": u"锰",
u"錶": u"表",
u"錸": u"铼",
u"鍀": u"锝",
u"鍁": u"锨",
u"鍃": u"锪",
u"鍆": u"钔",
u"鍇": u"锴",
u"鍈": u"锳",
u"鍋": u"锅",
u"鍍": u"镀",
u"鍔": u"锷",
u"鍘": u"铡",
u"鍚": u"钖",
u"鍛": u"锻",
u"鍠": u"锽",
u"鍤": u"锸",
u"鍥": u"锲",
u"鍩": u"锘",
u"鍬": u"锹",
u"鍰": u"锾",
u"鍵": u"键",
u"鍶": u"锶",
u"鍺": u"锗",
u"鍾": u"钟",
u"鎂": u"镁",
u"鎄": u"锿",
u"鎇": u"镅",
u"鎊": u"镑",
u"鎔": u"镕",
u"鎖": u"锁",
u"鎘": u"镉",
u"鎚": u"锤",
u"鎛": u"镈",
u"鎝": u"𨱏",
u"鎡": u"镃",
u"鎢": u"钨",
u"鎣": u"蓥",
u"鎦": u"镏",
u"鎧": u"铠",
u"鎩": u"铩",
u"鎪": u"锼",
u"鎬": u"镐",
u"鎮": u"镇",
u"鎰": u"镒",
u"鎲": u"镋",
u"鎳": u"镍",
u"鎵": u"镓",
u"鎸": u"镌",
u"鎿": u"镎",
u"鏃": u"镞",
u"鏇": u"镟",
u"鏈": u"链",
u"鏌": u"镆",
u"鏍": u"镙",
u"鏐": u"镠",
u"鏑": u"镝",
u"鏗": u"铿",
u"鏘": u"锵",
u"鏜": u"镗",
u"鏝": u"镘",
u"鏞": u"镛",
u"鏟": u"铲",
u"鏡": u"镜",
u"鏢": u"镖",
u"鏤": u"镂",
u"鏨": u"錾",
u"鏰": u"镚",
u"鏵": u"铧",
u"鏷": u"镤",
u"鏹": u"镪",
u"鏽": u"锈",
u"鐃": u"铙",
u"鐋": u"铴",
u"鐐": u"镣",
u"鐒": u"铹",
u"鐓": u"镦",
u"鐔": u"镡",
u"鐘": u"钟",
u"鐙": u"镫",
u"鐝": u"镢",
u"鐠": u"镨",
u"鐦": u"锎",
u"鐧": u"锏",
u"鐨": u"镄",
u"鐫": u"镌",
u"鐮": u"镰",
u"鐲": u"镯",
u"鐳": u"镭",
u"鐵": u"铁",
u"鐶": u"镮",
u"鐸": u"铎",
u"鐺": u"铛",
u"鐿": u"镱",
u"鑄": u"铸",
u"鑊": u"镬",
u"鑌": u"镔",
u"鑒": u"鉴",
u"鑔": u"镲",
u"鑕": u"锧",
u"鑞": u"镴",
u"鑠": u"铄",
u"鑣": u"镳",
u"鑥": u"镥",
u"鑭": u"镧",
u"鑰": u"钥",
u"鑱": u"镵",
u"鑲": u"镶",
u"鑷": u"镊",
u"鑹": u"镩",
u"鑼": u"锣",
u"鑽": u"钻",
u"鑾": u"銮",
u"鑿": u"凿",
u"钁": u"镢",
u"镟": u"旋",
u"長": u"长",
u"門": u"门",
u"閂": u"闩",
u"閃": u"闪",
u"閆": u"闫",
u"閈": u"闬",
u"閉": u"闭",
u"開": u"开",
u"閌": u"闶",
u"閎": u"闳",
u"閏": u"闰",
u"閑": u"闲",
u"間": u"间",
u"閔": u"闵",
u"閘": u"闸",
u"閡": u"阂",
u"閣": u"阁",
u"閤": u"合",
u"閥": u"阀",
u"閨": u"闺",
u"閩": u"闽",
u"閫": u"阃",
u"閬": u"阆",
u"閭": u"闾",
u"閱": u"阅",
u"閲": u"阅",
u"閶": u"阊",
u"閹": u"阉",
u"閻": u"阎",
u"閼": u"阏",
u"閽": u"阍",
u"閾": u"阈",
u"閿": u"阌",
u"闃": u"阒",
u"闆": u"板",
u"闈": u"闱",
u"闊": u"阔",
u"闋": u"阕",
u"闌": u"阑",
u"闍": u"阇",
u"闐": u"阗",
u"闒": u"阘",
u"闓": u"闿",
u"闔": u"阖",
u"闕": u"阙",
u"闖": u"闯",
u"關": u"关",
u"闞": u"阚",
u"闠": u"阓",
u"闡": u"阐",
u"闤": u"阛",
u"闥": u"闼",
u"阪": u"坂",
u"陘": u"陉",
u"陝": u"陕",
u"陣": u"阵",
u"陰": u"阴",
u"陳": u"陈",
u"陸": u"陆",
u"陽": u"阳",
u"隉": u"陧",
u"隊": u"队",
u"階": u"阶",
u"隕": u"陨",
u"際": u"际",
u"隨": u"随",
u"險": u"险",
u"隱": u"隐",
u"隴": u"陇",
u"隸": u"隶",
u"隻": u"只",
u"雋": u"隽",
u"雖": u"虽",
u"雙": u"双",
u"雛": u"雏",
u"雜": u"杂",
u"雞": u"鸡",
u"離": u"离",
u"難": u"难",
u"雲": u"云",
u"電": u"电",
u"霢": u"霡",
u"霧": u"雾",
u"霽": u"霁",
u"靂": u"雳",
u"靄": u"霭",
u"靈": u"灵",
u"靚": u"靓",
u"靜": u"静",
u"靨": u"靥",
u"鞀": u"鼗",
u"鞏": u"巩",
u"鞝": u"绱",
u"鞦": u"秋",
u"鞽": u"鞒",
u"韁": u"缰",
u"韃": u"鞑",
u"韆": u"千",
u"韉": u"鞯",
u"韋": u"韦",
u"韌": u"韧",
u"韍": u"韨",
u"韓": u"韩",
u"韙": u"韪",
u"韜": u"韬",
u"韞": u"韫",
u"韻": u"韵",
u"響": u"响",
u"頁": u"页",
u"頂": u"顶",
u"頃": u"顷",
u"項": u"项",
u"順": u"顺",
u"頇": u"顸",
u"須": u"须",
u"頊": u"顼",
u"頌": u"颂",
u"頎": u"颀",
u"頏": u"颃",
u"預": u"预",
u"頑": u"顽",
u"頒": u"颁",
u"頓": u"顿",
u"頗": u"颇",
u"領": u"领",
u"頜": u"颌",
u"頡": u"颉",
u"頤": u"颐",
u"頦": u"颏",
u"頭": u"头",
u"頮": u"颒",
u"頰": u"颊",
u"頲": u"颋",
u"頴": u"颕",
u"頷": u"颔",
u"頸": u"颈",
u"頹": u"颓",
u"頻": u"频",
u"頽": u"颓",
u"顆": u"颗",
u"題": u"题",
u"額": u"额",
u"顎": u"颚",
u"顏": u"颜",
u"顒": u"颙",
u"顓": u"颛",
u"顔": u"颜",
u"願": u"愿",
u"顙": u"颡",
u"顛": u"颠",
u"類": u"类",
u"顢": u"颟",
u"顥": u"颢",
u"顧": u"顾",
u"顫": u"颤",
u"顬": u"颥",
u"顯": u"显",
u"顰": u"颦",
u"顱": u"颅",
u"顳": u"颞",
u"顴": u"颧",
u"風": u"风",
u"颭": u"飐",
u"颮": u"飑",
u"颯": u"飒",
u"颱": u"台",
u"颳": u"刮",
u"颶": u"飓",
u"颸": u"飔",
u"颺": u"飏",
u"颻": u"飖",
u"颼": u"飕",
u"飀": u"飗",
u"飄": u"飘",
u"飆": u"飙",
u"飈": u"飚",
u"飛": u"飞",
u"飠": u"饣",
u"飢": u"饥",
u"飣": u"饤",
u"飥": u"饦",
u"飩": u"饨",
u"飪": u"饪",
u"飫": u"饫",
u"飭": u"饬",
u"飯": u"饭",
u"飲": u"饮",
u"飴": u"饴",
u"飼": u"饲",
u"飽": u"饱",
u"飾": u"饰",
u"飿": u"饳",
u"餃": u"饺",
u"餄": u"饸",
u"餅": u"饼",
u"餉": u"饷",
u"養": u"养",
u"餌": u"饵",
u"餎": u"饹",
u"餏": u"饻",
u"餑": u"饽",
u"餒": u"馁",
u"餓": u"饿",
u"餕": u"馂",
u"餖": u"饾",
u"餚": u"肴",
u"餛": u"馄",
u"餜": u"馃",
u"餞": u"饯",
u"餡": u"馅",
u"館": u"馆",
u"餱": u"糇",
u"餳": u"饧",
u"餶": u"馉",
u"餷": u"馇",
u"餺": u"馎",
u"餼": u"饩",
u"餾": u"馏",
u"餿": u"馊",
u"饁": u"馌",
u"饃": u"馍",
u"饅": u"馒",
u"饈": u"馐",
u"饉": u"馑",
u"饊": u"馓",
u"饋": u"馈",
u"饌": u"馔",
u"饑": u"饥",
u"饒": u"饶",
u"饗": u"飨",
u"饜": u"餍",
u"饞": u"馋",
u"饢": u"馕",
u"馬": u"马",
u"馭": u"驭",
u"馮": u"冯",
u"馱": u"驮",
u"馳": u"驰",
u"馴": u"驯",
u"馹": u"驲",
u"駁": u"驳",
u"駐": u"驻",
u"駑": u"驽",
u"駒": u"驹",
u"駔": u"驵",
u"駕": u"驾",
u"駘": u"骀",
u"駙": u"驸",
u"駛": u"驶",
u"駝": u"驼",
u"駟": u"驷",
u"駡": u"骂",
u"駢": u"骈",
u"駭": u"骇",
u"駰": u"骃",
u"駱": u"骆",
u"駸": u"骎",
u"駿": u"骏",
u"騁": u"骋",
u"騂": u"骍",
u"騅": u"骓",
u"騌": u"骔",
u"騍": u"骒",
u"騎": u"骑",
u"騏": u"骐",
u"騖": u"骛",
u"騙": u"骗",
u"騤": u"骙",
u"騧": u"䯄",
u"騫": u"骞",
u"騭": u"骘",
u"騮": u"骝",
u"騰": u"腾",
u"騶": u"驺",
u"騷": u"骚",
u"騸": u"骟",
u"騾": u"骡",
u"驀": u"蓦",
u"驁": u"骜",
u"驂": u"骖",
u"驃": u"骠",
u"驄": u"骢",
u"驅": u"驱",
u"驊": u"骅",
u"驌": u"骕",
u"驍": u"骁",
u"驏": u"骣",
u"驕": u"骄",
u"驗": u"验",
u"驚": u"惊",
u"驛": u"驿",
u"驟": u"骤",
u"驢": u"驴",
u"驤": u"骧",
u"驥": u"骥",
u"驦": u"骦",
u"驪": u"骊",
u"驫": u"骉",
u"骯": u"肮",
u"髏": u"髅",
u"髒": u"脏",
u"體": u"体",
u"髕": u"髌",
u"髖": u"髋",
u"髮": u"发",
u"鬆": u"松",
u"鬍": u"胡",
u"鬚": u"须",
u"鬢": u"鬓",
u"鬥": u"斗",
u"鬧": u"闹",
u"鬩": u"阋",
u"鬮": u"阄",
u"鬱": u"郁",
u"魎": u"魉",
u"魘": u"魇",
u"魚": u"鱼",
u"魛": u"鱽",
u"魢": u"鱾",
u"魨": u"鲀",
u"魯": u"鲁",
u"魴": u"鲂",
u"魷": u"鱿",
u"魺": u"鲄",
u"鮁": u"鲅",
u"鮃": u"鲆",
u"鮊": u"鲌",
u"鮋": u"鲉",
u"鮍": u"鲏",
u"鮎": u"鲇",
u"鮐": u"鲐",
u"鮑": u"鲍",
u"鮒": u"鲋",
u"鮓": u"鲊",
u"鮚": u"鲒",
u"鮜": u"鲘",
u"鮝": u"鲞",
u"鮞": u"鲕",
u"鮦": u"鲖",
u"鮪": u"鲔",
u"鮫": u"鲛",
u"鮭": u"鲑",
u"鮮": u"鲜",
u"鮳": u"鲓",
u"鮶": u"鲪",
u"鮺": u"鲝",
u"鯀": u"鲧",
u"鯁": u"鲠",
u"鯇": u"鲩",
u"鯉": u"鲤",
u"鯊": u"鲨",
u"鯒": u"鲬",
u"鯔": u"鲻",
u"鯕": u"鲯",
u"鯖": u"鲭",
u"鯗": u"鲞",
u"鯛": u"鲷",
u"鯝": u"鲴",
u"鯡": u"鲱",
u"鯢": u"鲵",
u"鯤": u"鲲",
u"鯧": u"鲳",
u"鯨": u"鲸",
u"鯪": u"鲮",
u"鯫": u"鲰",
u"鯴": u"鲺",
u"鯷": u"鳀",
u"鯽": u"鲫",
u"鯿": u"鳊",
u"鰁": u"鳈",
u"鰂": u"鲗",
u"鰃": u"鳂",
u"鰈": u"鲽",
u"鰉": u"鳇",
u"鰍": u"鳅",
u"鰏": u"鲾",
u"鰐": u"鳄",
u"鰒": u"鳆",
u"鰓": u"鳃",
u"鰜": u"鳒",
u"鰟": u"鳑",
u"鰠": u"鳋",
u"鰣": u"鲥",
u"鰥": u"鳏",
u"鰨": u"鳎",
u"鰩": u"鳐",
u"鰭": u"鳍",
u"鰮": u"鳁",
u"鰱": u"鲢",
u"鰲": u"鳌",
u"鰳": u"鳓",
u"鰵": u"鳘",
u"鰷": u"鲦",
u"鰹": u"鲣",
u"鰺": u"鲹",
u"鰻": u"鳗",
u"鰼": u"鳛",
u"鰾": u"鳔",
u"鱂": u"鳉",
u"鱅": u"鳙",
u"鱈": u"鳕",
u"鱉": u"鳖",
u"鱒": u"鳟",
u"鱔": u"鳝",
u"鱖": u"鳜",
u"鱗": u"鳞",
u"鱘": u"鲟",
u"鱝": u"鲼",
u"鱟": u"鲎",
u"鱠": u"鲙",
u"鱣": u"鳣",
u"鱤": u"鳡",
u"鱧": u"鳢",
u"鱨": u"鲿",
u"鱭": u"鲚",
u"鱯": u"鳠",
u"鱷": u"鳄",
u"鱸": u"鲈",
u"鱺": u"鲡",
u"䰾": u"鲃",
u"䲁": u"鳚",
u"鳥": u"鸟",
u"鳧": u"凫",
u"鳩": u"鸠",
u"鳬": u"凫",
u"鳲": u"鸤",
u"鳳": u"凤",
u"鳴": u"鸣",
u"鳶": u"鸢",
u"鳾": u"䴓",
u"鴆": u"鸩",
u"鴇": u"鸨",
u"鴉": u"鸦",
u"鴒": u"鸰",
u"鴕": u"鸵",
u"鴛": u"鸳",
u"鴝": u"鸲",
u"鴞": u"鸮",
u"鴟": u"鸱",
u"鴣": u"鸪",
u"鴦": u"鸯",
u"鴨": u"鸭",
u"鴯": u"鸸",
u"鴰": u"鸹",
u"鴴": u"鸻",
u"鴷": u"䴕",
u"鴻": u"鸿",
u"鴿": u"鸽",
u"鵁": u"䴔",
u"鵂": u"鸺",
u"鵃": u"鸼",
u"鵐": u"鹀",
u"鵑": u"鹃",
u"鵒": u"鹆",
u"鵓": u"鹁",
u"鵜": u"鹈",
u"鵝": u"鹅",
u"鵠": u"鹄",
u"鵡": u"鹉",
u"鵪": u"鹌",
u"鵬": u"鹏",
u"鵮": u"鹐",
u"鵯": u"鹎",
u"鵲": u"鹊",
u"鵷": u"鹓",
u"鵾": u"鹍",
u"鶄": u"䴖",
u"鶇": u"鸫",
u"鶉": u"鹑",
u"鶊": u"鹒",
u"鶓": u"鹋",
u"鶖": u"鹙",
u"鶘": u"鹕",
u"鶚": u"鹗",
u"鶡": u"鹖",
u"鶥": u"鹛",
u"鶩": u"鹜",
u"鶪": u"䴗",
u"鶬": u"鸧",
u"鶯": u"莺",
u"鶲": u"鹟",
u"鶴": u"鹤",
u"鶹": u"鹠",
u"鶺": u"鹡",
u"鶻": u"鹘",
u"鶼": u"鹣",
u"鶿": u"鹚",
u"鷀": u"鹚",
u"鷁": u"鹢",
u"鷂": u"鹞",
u"鷄": u"鸡",
u"鷈": u"䴘",
u"鷊": u"鹝",
u"鷓": u"鹧",
u"鷖": u"鹥",
u"鷗": u"鸥",
u"鷙": u"鸷",
u"鷚": u"鹨",
u"鷥": u"鸶",
u"鷦": u"鹪",
u"鷫": u"鹔",
u"鷯": u"鹩",
u"鷲": u"鹫",
u"鷳": u"鹇",
u"鷸": u"鹬",
u"鷹": u"鹰",
u"鷺": u"鹭",
u"鷽": u"鸴",
u"鷿": u"䴙",
u"鸂": u"㶉",
u"鸇": u"鹯",
u"鸌": u"鹱",
u"鸏": u"鹲",
u"鸕": u"鸬",
u"鸘": u"鹴",
u"鸚": u"鹦",
u"鸛": u"鹳",
u"鸝": u"鹂",
u"鸞": u"鸾",
u"鹵": u"卤",
u"鹹": u"咸",
u"鹺": u"鹾",
u"鹽": u"盐",
u"麗": u"丽",
u"麥": u"麦",
u"麩": u"麸",
u"麯": u"曲",
u"麵": u"面",
u"麼": u"么",
u"麽": u"么",
u"黃": u"黄",
u"黌": u"黉",
u"點": u"点",
u"黨": u"党",
u"黲": u"黪",
u"黴": u"霉",
u"黶": u"黡",
u"黷": u"黩",
u"黽": u"黾",
u"黿": u"鼋",
u"鼉": u"鼍",
u"鼕": u"冬",
u"鼴": u"鼹",
u"齊": u"齐",
u"齋": u"斋",
u"齎": u"赍",
u"齏": u"齑",
u"齒": u"齿",
u"齔": u"龀",
u"齕": u"龁",
u"齗": u"龂",
u"齙": u"龅",
u"齜": u"龇",
u"齟": u"龃",
u"齠": u"龆",
u"齡": u"龄",
u"齣": u"出",
u"齦": u"龈",
u"齪": u"龊",
u"齬": u"龉",
u"齲": u"龋",
u"齶": u"腭",
u"齷": u"龌",
u"龍": u"龙",
u"龎": u"厐",
u"龐": u"庞",
u"龔": u"龚",
u"龕": u"龛",
u"龜": u"龟",
u"幾畫": u"几画",
u"賣畫": u"卖画",
u"滷鹼": u"卤碱",
u"原畫": u"原画",
u"口鹼": u"口碱",
u"古畫": u"古画",
u"名畫": u"名画",
u"奇畫": u"奇画",
u"如畫": u"如画",
u"弱鹼": u"弱碱",
u"彩畫": u"彩画",
u"所畫": u"所画",
u"扉畫": u"扉画",
u"教畫": u"教画",
u"水鹼": u"水碱",
u"洋鹼": u"洋碱",
u"炭畫": u"炭画",
u"畫一": u"画一",
u"畫上": u"画上",
u"畫下": u"画下",
u"畫中": u"画中",
u"畫供": u"画供",
u"畫兒": u"画儿",
u"畫具": u"画具",
u"畫出": u"画出",
u"畫史": u"画史",
u"畫品": u"画品",
u"畫商": u"画商",
u"畫圈": u"画圈",
u"畫境": u"画境",
u"畫工": u"画工",
u"畫帖": u"画帖",
u"畫幅": u"画幅",
u"畫意": u"画意",
u"畫成": u"画成",
u"畫景": u"画景",
u"畫本": u"画本",
u"畫架": u"画架",
u"畫框": u"画框",
u"畫法": u"画法",
u"畫王": u"画王",
u"畫界": u"画界",
u"畫符": u"画符",
u"畫紙": u"画纸",
u"畫線": u"画线",
u"畫航": u"画航",
u"畫舫": u"画舫",
u"畫虎": u"画虎",
u"畫論": u"画论",
u"畫譜": u"画谱",
u"畫象": u"画象",
u"畫質": u"画质",
u"畫貼": u"画贴",
u"畫軸": u"画轴",
u"畫頁": u"画页",
u"鹽鹼": u"盐碱",
u"鹼": u"碱",
u"鹼基": u"碱基",
u"鹼度": u"碱度",
u"鹼水": u"碱水",
u"鹼熔": u"碱熔",
u"磁畫": u"磁画",
u"策畫": u"策画",
u"組畫": u"组画",
u"絹畫": u"绢画",
u"耐鹼": u"耐碱",
u"肉鹼": u"肉碱",
u"膠畫": u"胶画",
u"茶鹼": u"茶碱",
u"西畫": u"西画",
u"貼畫": u"贴画",
u"返鹼": u"返碱",
u"鍾鍛": u"锺锻",
u"鍛鍾": u"锻锺",
u"雕畫": u"雕画",
u"鯰": u"鲶",
u"三聯畫": u"三联画",
u"中國畫": u"中国画",
u"書畫": u"书画",
u"書畫社": u"书画社",
u"五筆畫": u"五笔画",
u"作畫": u"作画",
u"入畫": u"入画",
u"寫生畫": u"写生画",
u"刻畫": u"刻画",
u"動畫": u"动画",
u"勾畫": u"勾画",
u"單色畫": u"单色画",
u"卡通畫": u"卡通画",
u"國畫": u"国画",
u"圖畫": u"图画",
u"壁畫": u"壁画",
u"字畫": u"字画",
u"宣傳畫": u"宣传画",
u"工筆畫": u"工笔画",
u"年畫": u"年画",
u"幽默畫": u"幽默画",
u"指畫": u"指画",
u"描畫": u"描画",
u"插畫": u"插画",
u"擘畫": u"擘画",
u"春畫": u"春画",
u"木刻畫": u"木刻画",
u"機械畫": u"机械画",
u"比畫": u"比画",
u"毛筆畫": u"毛笔画",
u"水粉畫": u"水粉画",
u"油畫": u"油画",
u"海景畫": u"海景画",
u"漫畫": u"漫画",
u"點畫": u"点画",
u"版畫": u"版画",
u"畫": u"画",
u"畫像": u"画像",
u"畫冊": u"画册",
u"畫刊": u"画刊",
u"畫匠": u"画匠",
u"畫捲": u"画卷",
u"畫圖": u"画图",
u"畫壇": u"画坛",
u"畫室": u"画室",
u"畫家": u"画家",
u"畫屏": u"画屏",
u"畫展": u"画展",
u"畫布": u"画布",
u"畫師": u"画师",
u"畫廊": u"画廊",
u"畫報": u"画报",
u"畫押": u"画押",
u"畫板": u"画板",
u"畫片": u"画片",
u"畫畫": u"画画",
u"畫皮": u"画皮",
u"畫眉鳥": u"画眉鸟",
u"畫稿": u"画稿",
u"畫筆": u"画笔",
u"畫院": u"画院",
u"畫集": u"画集",
u"畫面": u"画面",
u"筆畫": u"笔画",
u"細密畫": u"细密画",
u"繪畫": u"绘画",
u"自畫像": u"自画像",
u"蠟筆畫": u"蜡笔画",
u"裸體畫": u"裸体画",
u"西洋畫": u"西洋画",
u"透視畫": u"透视画",
u"銅版畫": u"铜版画",
u"鍾": u"锺",
u"靜物畫": u"静物画",
u"餘": u"馀",
}
zh2TW = {
u"缺省": u"預設",
u"串行": u"串列",
u"以太网": u"乙太網",
u"位图": u"點陣圖",
u"例程": u"常式",
u"信道": u"通道",
u"光标": u"游標",
u"光盘": u"光碟",
u"光驱": u"光碟機",
u"全角": u"全形",
u"加载": u"載入",
u"半角": u"半形",
u"变量": u"變數",
u"噪声": u"雜訊",
u"脱机": u"離線",
u"声卡": u"音效卡",
u"老字号": u"老字號",
u"字号": u"字型大小",
u"字库": u"字型檔",
u"字段": u"欄位",
u"字符": u"字元",
u"存盘": u"存檔",
u"寻址": u"定址",
u"尾注": u"章節附註",
u"异步": u"非同步",
u"总线": u"匯流排",
u"括号": u"括弧",
u"接口": u"介面",
u"控件": u"控制項",
u"权限": u"許可權",
u"盘片": u"碟片",
u"硅片": u"矽片",
u"硅谷": u"矽谷",
u"硬盘": u"硬碟",
u"磁盘": u"磁碟",
u"磁道": u"磁軌",
u"程控": u"程式控制",
u"端口": u"埠",
u"算子": u"運算元",
u"算法": u"演算法",
u"芯片": u"晶片",
u"芯片": u"晶元",
u"词组": u"片語",
u"译码": u"解碼",
u"软驱": u"軟碟機",
u"快闪存储器": u"快閃記憶體",
u"闪存": u"快閃記憶體",
u"鼠标": u"滑鼠",
u"进制": u"進位",
u"交互式": u"互動式",
u"仿真": u"模擬",
u"优先级": u"優先順序",
u"传感": u"感測",
u"便携式": u"攜帶型",
u"信息论": u"資訊理論",
u"写保护": u"防寫",
u"分布式": u"分散式",
u"分辨率": u"解析度",
u"服务器": u"伺服器",
u"等于": u"等於",
u"局域网": u"區域網",
u"计算机": u"電腦",
u"扫瞄仪": u"掃瞄器",
u"宽带": u"寬頻",
u"数据库": u"資料庫",
u"奶酪": u"乳酪",
u"巨商": u"鉅賈",
u"手电": u"手電筒",
u"万历": u"萬曆",
u"永历": u"永曆",
u"词汇": u"辭彙",
u"习用": u"慣用",
u"元音": u"母音",
u"任意球": u"自由球",
u"头球": u"頭槌",
u"入球": u"進球",
u"粒入球": u"顆進球",
u"打门": u"射門",
u"火锅盖帽": u"蓋火鍋",
u"打印机": u"印表機",
u"打印機": u"印表機",
u"字节": u"位元組",
u"字節": u"位元組",
u"打印": u"列印",
u"打印": u"列印",
u"硬件": u"硬體",
u"硬件": u"硬體",
u"二极管": u"二極體",
u"二極管": u"二極體",
u"三极管": u"三極體",
u"三極管": u"三極體",
u"软件": u"軟體",
u"軟件": u"軟體",
u"网络": u"網路",
u"網絡": u"網路",
u"人工智能": u"人工智慧",
u"航天飞机": u"太空梭",
u"穿梭機": u"太空梭",
u"因特网": u"網際網路",
u"互聯網": u"網際網路",
u"机器人": u"機器人",
u"機械人": u"機器人",
u"移动电话": u"行動電話",
u"流動電話": u"行動電話",
u"调制解调器": u"數據機",
u"調制解調器": u"數據機",
u"短信": u"簡訊",
u"短訊": u"簡訊",
u"乌兹别克斯坦": u"烏茲別克",
u"乍得": u"查德",
u"乍得": u"查德",
u"也门": u"葉門",
u"也門": u"葉門",
u"伯利兹": u"貝里斯",
u"伯利茲": u"貝里斯",
u"佛得角": u"維德角",
u"佛得角": u"維德角",
u"克罗地亚": u"克羅埃西亞",
u"克羅地亞": u"克羅埃西亞",
u"冈比亚": u"甘比亞",
u"岡比亞": u"甘比亞",
u"几内亚比绍": u"幾內亞比索",
u"幾內亞比紹": u"幾內亞比索",
u"列支敦士登": u"列支敦斯登",
u"列支敦士登": u"列支敦斯登",
u"利比里亚": u"賴比瑞亞",
u"利比里亞": u"賴比瑞亞",
u"加纳": u"迦納",
u"加納": u"迦納",
u"加蓬": u"加彭",
u"加蓬": u"加彭",
u"博茨瓦纳": u"波札那",
u"博茨瓦納": u"波札那",
u"卡塔尔": u"卡達",
u"卡塔爾": u"卡達",
u"卢旺达": u"盧安達",
u"盧旺達": u"盧安達",
u"危地马拉": u"瓜地馬拉",
u"危地馬拉": u"瓜地馬拉",
u"厄瓜多尔": u"厄瓜多",
u"厄瓜多爾": u"厄瓜多",
u"厄立特里亚": u"厄利垂亞",
u"厄立特里亞": u"厄利垂亞",
u"吉布提": u"吉布地",
u"吉布堤": u"吉布地",
u"哈萨克斯坦": u"哈薩克",
u"哥斯达黎加": u"哥斯大黎加",
u"哥斯達黎加": u"哥斯大黎加",
u"图瓦卢": u"吐瓦魯",
u"圖瓦盧": u"吐瓦魯",
u"土库曼斯坦": u"土庫曼",
u"圣卢西亚": u"聖露西亞",
u"聖盧西亞": u"聖露西亞",
u"圣基茨和尼维斯": u"聖克里斯多福及尼維斯",
u"聖吉斯納域斯": u"聖克里斯多福及尼維斯",
u"圣文森特和格林纳丁斯": u"聖文森及格瑞那丁",
u"聖文森特和格林納丁斯": u"聖文森及格瑞那丁",
u"圣马力诺": u"聖馬利諾",
u"聖馬力諾": u"聖馬利諾",
u"圭亚那": u"蓋亞那",
u"圭亞那": u"蓋亞那",
u"坦桑尼亚": u"坦尚尼亞",
u"坦桑尼亞": u"坦尚尼亞",
u"埃塞俄比亚": u"衣索比亞",
u"埃塞俄比亞": u"衣索比亞",
u"基里巴斯": u"吉里巴斯",
u"基里巴斯": u"吉里巴斯",
u"塔吉克斯坦": u"塔吉克",
u"塞拉利昂": u"獅子山",
u"塞拉利昂": u"獅子山",
u"塞浦路斯": u"塞普勒斯",
u"塞浦路斯": u"塞普勒斯",
u"塞舌尔": u"塞席爾",
u"塞舌爾": u"塞席爾",
u"多米尼加": u"多明尼加",
u"多明尼加共和國": u"多明尼加",
u"多米尼加联邦": u"多米尼克",
u"多明尼加聯邦": u"多米尼克",
u"安提瓜和巴布达": u"安地卡及巴布達",
u"安提瓜和巴布達": u"安地卡及巴布達",
u"尼日利亚": u"奈及利亞",
u"尼日利亞": u"奈及利亞",
u"尼日尔": u"尼日",
u"尼日爾": u"尼日",
u"巴巴多斯": u"巴貝多",
u"巴巴多斯": u"巴貝多",
u"巴布亚新几内亚": u"巴布亞紐幾內亞",
u"巴布亞新畿內亞": u"巴布亞紐幾內亞",
u"布基纳法索": u"布吉納法索",
u"布基納法索": u"布吉納法索",
u"布隆迪": u"蒲隆地",
u"布隆迪": u"蒲隆地",
u"希腊": u"希臘",
u"帕劳": u"帛琉",
u"意大利": u"義大利",
u"意大利": u"義大利",
u"所罗门群岛": u"索羅門群島",
u"所羅門群島": u"索羅門群島",
u"文莱": u"汶萊",
u"斯威士兰": u"史瓦濟蘭",
u"斯威士蘭": u"史瓦濟蘭",
u"斯洛文尼亚": u"斯洛維尼亞",
u"斯洛文尼亞": u"斯洛維尼亞",
u"新西兰": u"紐西蘭",
u"新西蘭": u"紐西蘭",
u"格林纳达": u"格瑞那達",
u"格林納達": u"格瑞那達",
u"格鲁吉亚": u"喬治亞",
u"格魯吉亞": u"喬治亞",
u"佐治亚": u"喬治亞",
u"佐治亞": u"喬治亞",
u"毛里塔尼亚": u"茅利塔尼亞",
u"毛里塔尼亞": u"茅利塔尼亞",
u"毛里求斯": u"模里西斯",
u"毛里裘斯": u"模里西斯",
u"沙特阿拉伯": u"沙烏地阿拉伯",
u"沙地阿拉伯": u"沙烏地阿拉伯",
u"波斯尼亚和黑塞哥维那": u"波士尼亞赫塞哥維納",
u"波斯尼亞黑塞哥維那": u"波士尼亞赫塞哥維納",
u"津巴布韦": u"辛巴威",
u"津巴布韋": u"辛巴威",
u"洪都拉斯": u"宏都拉斯",
u"洪都拉斯": u"宏都拉斯",
u"特立尼达和托巴哥": u"千里達托貝哥",
u"特立尼達和多巴哥": u"千里達托貝哥",
u"瑙鲁": u"諾魯",
u"瑙魯": u"諾魯",
u"瓦努阿图": u"萬那杜",
u"瓦努阿圖": u"萬那杜",
u"溫納圖萬": u"那杜",
u"科摩罗": u"葛摩",
u"科摩羅": u"葛摩",
u"科特迪瓦": u"象牙海岸",
u"突尼斯": u"突尼西亞",
u"索马里": u"索馬利亞",
u"索馬里": u"索馬利亞",
u"老挝": u"寮國",
u"老撾": u"寮國",
u"肯尼亚": u"肯亞",
u"肯雅": u"肯亞",
u"苏里南": u"蘇利南",
u"莫桑比克": u"莫三比克",
u"莱索托": u"賴索托",
u"萊索托": u"賴索托",
u"贝宁": u"貝南",
u"貝寧": u"貝南",
u"赞比亚": u"尚比亞",
u"贊比亞": u"尚比亞",
u"阿塞拜疆": u"亞塞拜然",
u"阿塞拜疆": u"亞塞拜然",
u"阿拉伯联合酋长国": u"阿拉伯聯合大公國",
u"阿拉伯聯合酋長國": u"阿拉伯聯合大公國",
u"马尔代夫": u"馬爾地夫",
u"馬爾代夫": u"馬爾地夫",
u"马耳他": u"馬爾他",
u"马里共和国": u"馬利共和國",
u"馬里共和國": u"馬利共和國",
u"方便面": u"速食麵",
u"快速面": u"速食麵",
u"即食麵": u"速食麵",
u"薯仔": u"土豆",
u"蹦极跳": u"笨豬跳",
u"绑紧跳": u"笨豬跳",
u"冷菜": u"冷盤",
u"凉菜": u"冷盤",
u"出租车": u"計程車",
u"台球": u"撞球",
u"桌球": u"撞球",
u"雪糕": u"冰淇淋",
u"卫生": u"衛生",
u"衞生": u"衛生",
u"平治": u"賓士",
u"奔驰": u"賓士",
u"積架": u"捷豹",
u"福士": u"福斯",
u"雪铁龙": u"雪鐵龍",
u"马自达": u"馬自達",
u"萬事得": u"馬自達",
u"拿破仑": u"拿破崙",
u"拿破侖": u"拿破崙",
u"布什": u"布希",
u"布殊": u"布希",
u"克林顿": u"柯林頓",
u"克林頓": u"柯林頓",
u"侯赛因": u"海珊",
u"侯賽因": u"海珊",
u"凡高": u"梵谷",
u"狄安娜": u"黛安娜",
u"戴安娜": u"黛安娜",
u"赫拉": u"希拉",
}
zh2HK = {
u"打印机": u"打印機",
u"印表機": u"打印機",
u"字节": u"位元組",
u"字節": u"位元組",
u"打印": u"打印",
u"列印": u"打印",
u"硬件": u"硬件",
u"硬體": u"硬件",
u"二极管": u"二極管",
u"二極體": u"二極管",
u"三极管": u"三極管",
u"三極體": u"三極管",
u"数码": u"數碼",
u"數位": u"數碼",
u"软件": u"軟件",
u"軟體": u"軟件",
u"网络": u"網絡",
u"網路": u"網絡",
u"人工智能": u"人工智能",
u"人工智慧": u"人工智能",
u"航天飞机": u"穿梭機",
u"太空梭": u"穿梭機",
u"因特网": u"互聯網",
u"網際網路": u"互聯網",
u"机器人": u"機械人",
u"機器人": u"機械人",
u"移动电话": u"流動電話",
u"行動電話": u"流動電話",
u"调制解调器": u"調制解調器",
u"數據機": u"調制解調器",
u"短信": u"短訊",
u"簡訊": u"短訊",
u"乍得": u"乍得",
u"查德": u"乍得",
u"也门": u"也門",
u"葉門": u"也門",
u"伯利兹": u"伯利茲",
u"貝里斯": u"伯利茲",
u"佛得角": u"佛得角",
u"維德角": u"佛得角",
u"克罗地亚": u"克羅地亞",
u"克羅埃西亞": u"克羅地亞",
u"冈比亚": u"岡比亞",
u"甘比亞": u"岡比亞",
u"几内亚比绍": u"幾內亞比紹",
u"幾內亞比索": u"幾內亞比紹",
u"列支敦士登": u"列支敦士登",
u"列支敦斯登": u"列支敦士登",
u"利比里亚": u"利比里亞",
u"賴比瑞亞": u"利比里亞",
u"加纳": u"加納",
u"迦納": u"加納",
u"加蓬": u"加蓬",
u"加彭": u"加蓬",
u"博茨瓦纳": u"博茨瓦納",
u"波札那": u"博茨瓦納",
u"卡塔尔": u"卡塔爾",
u"卡達": u"卡塔爾",
u"卢旺达": u"盧旺達",
u"盧安達": u"盧旺達",
u"危地马拉": u"危地馬拉",
u"瓜地馬拉": u"危地馬拉",
u"厄瓜多尔": u"厄瓜多爾",
u"厄瓜多": u"厄瓜多爾",
u"厄立特里亚": u"厄立特里亞",
u"厄利垂亞": u"厄立特里亞",
u"吉布提": u"吉布堤",
u"吉布地": u"吉布堤",
u"哥斯达黎加": u"哥斯達黎加",
u"哥斯大黎加": u"哥斯達黎加",
u"图瓦卢": u"圖瓦盧",
u"吐瓦魯": u"圖瓦盧",
u"圣卢西亚": u"聖盧西亞",
u"聖露西亞": u"聖盧西亞",
u"圣基茨和尼维斯": u"聖吉斯納域斯",
u"聖克里斯多福及尼維斯": u"聖吉斯納域斯",
u"圣文森特和格林纳丁斯": u"聖文森特和格林納丁斯",
u"聖文森及格瑞那丁": u"聖文森特和格林納丁斯",
u"圣马力诺": u"聖馬力諾",
u"聖馬利諾": u"聖馬力諾",
u"圭亚那": u"圭亞那",
u"蓋亞那": u"圭亞那",
u"坦桑尼亚": u"坦桑尼亞",
u"坦尚尼亞": u"坦桑尼亞",
u"埃塞俄比亚": u"埃塞俄比亞",
u"衣索匹亞": u"埃塞俄比亞",
u"衣索比亞": u"埃塞俄比亞",
u"基里巴斯": u"基里巴斯",
u"吉里巴斯": u"基里巴斯",
u"狮子山": u"獅子山",
u"塞普勒斯": u"塞浦路斯",
u"塞舌尔": u"塞舌爾",
u"塞席爾": u"塞舌爾",
u"多米尼加": u"多明尼加共和國",
u"多明尼加": u"多明尼加共和國",
u"多米尼加联邦": u"多明尼加聯邦",
u"多米尼克": u"多明尼加聯邦",
u"安提瓜和巴布达": u"安提瓜和巴布達",
u"安地卡及巴布達": u"安提瓜和巴布達",
u"尼日利亚": u"尼日利亞",
u"奈及利亞": u"尼日利亞",
u"尼日尔": u"尼日爾",
u"尼日": u"尼日爾",
u"巴巴多斯": u"巴巴多斯",
u"巴貝多": u"巴巴多斯",
u"巴布亚新几内亚": u"巴布亞新畿內亞",
u"巴布亞紐幾內亞": u"巴布亞新畿內亞",
u"布基纳法索": u"布基納法索",
u"布吉納法索": u"布基納法索",
u"布隆迪": u"布隆迪",
u"蒲隆地": u"布隆迪",
u"義大利": u"意大利",
u"所罗门群岛": u"所羅門群島",
u"索羅門群島": u"所羅門群島",
u"斯威士兰": u"斯威士蘭",
u"史瓦濟蘭": u"斯威士蘭",
u"斯洛文尼亚": u"斯洛文尼亞",
u"斯洛維尼亞": u"斯洛文尼亞",
u"新西兰": u"新西蘭",
u"紐西蘭": u"新西蘭",
u"格林纳达": u"格林納達",
u"格瑞那達": u"格林納達",
u"格鲁吉亚": u"喬治亞",
u"格魯吉亞": u"喬治亞",
u"梵蒂冈": u"梵蒂岡",
u"毛里塔尼亚": u"毛里塔尼亞",
u"茅利塔尼亞": u"毛里塔尼亞",
u"毛里求斯": u"毛里裘斯",
u"模里西斯": u"毛里裘斯",
u"沙烏地阿拉伯": u"沙特阿拉伯",
u"波斯尼亚和黑塞哥维那": u"波斯尼亞黑塞哥維那",
u"波士尼亞赫塞哥維納": u"波斯尼亞黑塞哥維那",
u"津巴布韦": u"津巴布韋",
u"辛巴威": u"津巴布韋",
u"洪都拉斯": u"洪都拉斯",
u"宏都拉斯": u"洪都拉斯",
u"特立尼达和托巴哥": u"特立尼達和多巴哥",
u"千里達托貝哥": u"特立尼達和多巴哥",
u"瑙鲁": u"瑙魯",
u"諾魯": u"瑙魯",
u"瓦努阿图": u"瓦努阿圖",
u"萬那杜": u"瓦努阿圖",
u"科摩罗": u"科摩羅",
u"葛摩": u"科摩羅",
u"索马里": u"索馬里",
u"索馬利亞": u"索馬里",
u"老挝": u"老撾",
u"寮國": u"老撾",
u"肯尼亚": u"肯雅",
u"肯亞": u"肯雅",
u"莫桑比克": u"莫桑比克",
u"莫三比克": u"莫桑比克",
u"莱索托": u"萊索托",
u"賴索托": u"萊索托",
u"贝宁": u"貝寧",
u"貝南": u"貝寧",
u"赞比亚": u"贊比亞",
u"尚比亞": u"贊比亞",
u"阿塞拜疆": u"阿塞拜疆",
u"亞塞拜然": u"阿塞拜疆",
u"阿拉伯联合酋长国": u"阿拉伯聯合酋長國",
u"阿拉伯聯合大公國": u"阿拉伯聯合酋長國",
u"马尔代夫": u"馬爾代夫",
u"馬爾地夫": u"馬爾代夫",
u"馬利共和國": u"馬里共和國",
u"方便面": u"即食麵",
u"快速面": u"即食麵",
u"速食麵": u"即食麵",
u"泡麵": u"即食麵",
u"土豆": u"馬鈴薯",
u"华乐": u"中樂",
u"民乐": u"中樂",
u"計程車": u"的士",
u"出租车": u"的士",
u"公車": u"巴士",
u"自行车": u"單車",
u"犬只": u"狗隻",
u"台球": u"桌球",
u"撞球": u"桌球",
u"冰淇淋": u"雪糕",
u"賓士": u"平治",
u"捷豹": u"積架",
u"福斯": u"福士",
u"雪铁龙": u"先進",
u"雪鐵龍": u"先進",
u"沃尓沃": u"富豪",
u"马自达": u"萬事得",
u"馬自達": u"萬事得",
u"寶獅": u"標致",
u"拿破崙": u"拿破侖",
u"布什": u"布殊",
u"布希": u"布殊",
u"克林顿": u"克林頓",
u"柯林頓": u"克林頓",
u"萨达姆": u"薩達姆",
u"海珊": u"侯賽因",
u"侯赛因": u"侯賽因",
u"大卫·贝克汉姆": u"大衛碧咸",
u"迈克尔·欧文": u"米高奧雲",
u"珍妮弗·卡普里亚蒂": u"卡佩雅蒂",
u"马拉特·萨芬": u"沙芬",
u"迈克尔·舒马赫": u"舒麥加",
u"希特勒": u"希特拉",
u"狄安娜": u"戴安娜",
u"黛安娜": u"戴安娜",
}
zh2CN = {
u"記憶體": u"内存",
u"預設": u"默认",
u"串列": u"串行",
u"乙太網": u"以太网",
u"點陣圖": u"位图",
u"常式": u"例程",
u"游標": u"光标",
u"光碟": u"光盘",
u"光碟機": u"光驱",
u"全形": u"全角",
u"共用": u"共享",
u"載入": u"加载",
u"半形": u"半角",
u"變數": u"变量",
u"雜訊": u"噪声",
u"因數": u"因子",
u"功能變數名稱": u"域名",
u"音效卡": u"声卡",
u"字型大小": u"字号",
u"字型檔": u"字库",
u"欄位": u"字段",
u"字元": u"字符",
u"存檔": u"存盘",
u"定址": u"寻址",
u"章節附註": u"尾注",
u"非同步": u"异步",
u"匯流排": u"总线",
u"括弧": u"括号",
u"介面": u"接口",
u"控制項": u"控件",
u"許可權": u"权限",
u"碟片": u"盘片",
u"矽片": u"硅片",
u"矽谷": u"硅谷",
u"硬碟": u"硬盘",
u"磁碟": u"磁盘",
u"磁軌": u"磁道",
u"程式控制": u"程控",
u"運算元": u"算子",
u"演算法": u"算法",
u"晶片": u"芯片",
u"晶元": u"芯片",
u"片語": u"词组",
u"軟碟機": u"软驱",
u"快閃記憶體": u"快闪存储器",
u"滑鼠": u"鼠标",
u"進位": u"进制",
u"互動式": u"交互式",
u"優先順序": u"优先级",
u"感測": u"传感",
u"攜帶型": u"便携式",
u"資訊理論": u"信息论",
u"迴圈": u"循环",
u"防寫": u"写保护",
u"分散式": u"分布式",
u"解析度": u"分辨率",
u"伺服器": u"服务器",
u"等於": u"等于",
u"區域網": u"局域网",
u"巨集": u"宏",
u"掃瞄器": u"扫瞄仪",
u"寬頻": u"宽带",
u"資料庫": u"数据库",
u"乳酪": u"奶酪",
u"鉅賈": u"巨商",
u"手電筒": u"手电",
u"萬曆": u"万历",
u"永曆": u"永历",
u"辭彙": u"词汇",
u"母音": u"元音",
u"自由球": u"任意球",
u"頭槌": u"头球",
u"進球": u"入球",
u"顆進球": u"粒入球",
u"射門": u"打门",
u"蓋火鍋": u"火锅盖帽",
u"印表機": u"打印机",
u"打印機": u"打印机",
u"位元組": u"字节",
u"字節": u"字节",
u"列印": u"打印",
u"打印": u"打印",
u"硬體": u"硬件",
u"二極體": u"二极管",
u"二極管": u"二极管",
u"三極體": u"三极管",
u"三極管": u"三极管",
u"數位": u"数码",
u"數碼": u"数码",
u"軟體": u"软件",
u"軟件": u"软件",
u"網路": u"网络",
u"網絡": u"网络",
u"人工智慧": u"人工智能",
u"太空梭": u"航天飞机",
u"穿梭機": u"航天飞机",
u"網際網路": u"因特网",
u"互聯網": u"因特网",
u"機械人": u"机器人",
u"機器人": u"机器人",
u"行動電話": u"移动电话",
u"流動電話": u"移动电话",
u"調制解調器": u"调制解调器",
u"數據機": u"调制解调器",
u"短訊": u"短信",
u"簡訊": u"短信",
u"烏茲別克": u"乌兹别克斯坦",
u"查德": u"乍得",
u"乍得": u"乍得",
u"也門": u"",
u"葉門": u"也门",
u"伯利茲": u"伯利兹",
u"貝里斯": u"伯利兹",
u"維德角": u"佛得角",
u"佛得角": u"佛得角",
u"克羅地亞": u"克罗地亚",
u"克羅埃西亞": u"克罗地亚",
u"岡比亞": u"冈比亚",
u"甘比亞": u"冈比亚",
u"幾內亞比紹": u"几内亚比绍",
u"幾內亞比索": u"几内亚比绍",
u"列支敦斯登": u"列支敦士登",
u"列支敦士登": u"列支敦士登",
u"利比里亞": u"利比里亚",
u"賴比瑞亞": u"利比里亚",
u"加納": u"加纳",
u"迦納": u"加纳",
u"加彭": u"加蓬",
u"加蓬": u"加蓬",
u"博茨瓦納": u"博茨瓦纳",
u"波札那": u"博茨瓦纳",
u"卡塔爾": u"卡塔尔",
u"卡達": u"卡塔尔",
u"盧旺達": u"卢旺达",
u"盧安達": u"卢旺达",
u"危地馬拉": u"危地马拉",
u"瓜地馬拉": u"危地马拉",
u"厄瓜多爾": u"厄瓜多尔",
u"厄瓜多": u"厄瓜多尔",
u"厄立特里亞": u"厄立特里亚",
u"厄利垂亞": u"厄立特里亚",
u"吉布堤": u"吉布提",
u"吉布地": u"吉布提",
u"哈薩克": u"哈萨克斯坦",
u"哥斯達黎加": u"哥斯达黎加",
u"哥斯大黎加": u"哥斯达黎加",
u"圖瓦盧": u"图瓦卢",
u"吐瓦魯": u"图瓦卢",
u"土庫曼": u"土库曼斯坦",
u"聖盧西亞": u"圣卢西亚",
u"聖露西亞": u"圣卢西亚",
u"聖吉斯納域斯": u"圣基茨和尼维斯",
u"聖克里斯多福及尼維斯": u"圣基茨和尼维斯",
u"聖文森特和格林納丁斯": u"圣文森特和格林纳丁斯",
u"聖文森及格瑞那丁": u"圣文森特和格林纳丁斯",
u"聖馬力諾": u"圣马力诺",
u"聖馬利諾": u"圣马力诺",
u"圭亞那": u"圭亚那",
u"蓋亞那": u"圭亚那",
u"坦桑尼亞": u"坦桑尼亚",
u"坦尚尼亞": u"坦桑尼亚",
u"埃塞俄比亞": u"埃塞俄比亚",
u"衣索匹亞": u"埃塞俄比亚",
u"衣索比亞": u"埃塞俄比亚",
u"吉里巴斯": u"基里巴斯",
u"基里巴斯": u"基里巴斯",
u"塔吉克": u"塔吉克斯坦",
u"塞拉利昂": u"塞拉利昂",
u"塞普勒斯": u"塞浦路斯",
u"塞浦路斯": u"塞浦路斯",
u"塞舌爾": u"塞舌尔",
u"塞席爾": u"塞舌尔",
u"多明尼加共和國": u"多米尼加",
u"多明尼加": u"多米尼加",
u"多明尼加聯邦": u"多米尼加联邦",
u"多米尼克": u"多米尼加联邦",
u"安提瓜和巴布達": u"安提瓜和巴布达",
u"安地卡及巴布達": u"安提瓜和巴布达",
u"尼日利亞": u"尼日利亚",
u"奈及利亞": u"尼日利亚",
u"尼日爾": u"尼日尔",
u"尼日": u"尼日尔",
u"巴貝多": u"巴巴多斯",
u"巴巴多斯": u"巴巴多斯",
u"巴布亞新畿內亞": u"巴布亚新几内亚",
u"巴布亞紐幾內亞": u"巴布亚新几内亚",
u"布基納法索": u"布基纳法索",
u"布吉納法索": u"布基纳法索",
u"蒲隆地": u"布隆迪",
u"布隆迪": u"布隆迪",
u"希臘": u"希腊",
u"帛琉": u"帕劳",
u"義大利": u"意大利",
u"意大利": u"意大利",
u"所羅門群島": u"所罗门群岛",
u"索羅門群島": u"所罗门群岛",
u"汶萊": u"文莱",
u"斯威士蘭": u"斯威士兰",
u"史瓦濟蘭": u"斯威士兰",
u"斯洛文尼亞": u"斯洛文尼亚",
u"斯洛維尼亞": u"斯洛文尼亚",
u"新西蘭": u"新西兰",
u"紐西蘭": u"新西兰",
u"格林納達": u"格林纳达",
u"格瑞那達": u"格林纳达",
u"格魯吉亞": u"乔治亚",
u"喬治亞": u"乔治亚",
u"梵蒂岡": u"梵蒂冈",
u"毛里塔尼亞": u"毛里塔尼亚",
u"茅利塔尼亞": u"毛里塔尼亚",
u"毛里裘斯": u"毛里求斯",
u"模里西斯": u"毛里求斯",
u"沙地阿拉伯": u"沙特阿拉伯",
u"沙烏地阿拉伯": u"沙特阿拉伯",
u"波斯尼亞黑塞哥維那": u"波斯尼亚和黑塞哥维那",
u"波士尼亞赫塞哥維納": u"波斯尼亚和黑塞哥维那",
u"津巴布韋": u"津巴布韦",
u"辛巴威": u"津巴布韦",
u"宏都拉斯": u"洪都拉斯",
u"洪都拉斯": u"洪都拉斯",
u"特立尼達和多巴哥": u"特立尼达和托巴哥",
u"千里達托貝哥": u"特立尼达和托巴哥",
u"瑙魯": u"瑙鲁",
u"諾魯": u"瑙鲁",
u"瓦努阿圖": u"瓦努阿图",
u"萬那杜": u"瓦努阿图",
u"溫納圖": u"瓦努阿图",
u"科摩羅": u"科摩罗",
u"葛摩": u"科摩罗",
u"象牙海岸": u"科特迪瓦",
u"突尼西亞": u"突尼斯",
u"索馬里": u"索马里",
u"索馬利亞": u"索马里",
u"老撾": u"老挝",
u"寮國": u"老挝",
u"肯雅": u"肯尼亚",
u"肯亞": u"肯尼亚",
u"蘇利南": u"苏里南",
u"莫三比克": u"莫桑比克",
u"莫桑比克": u"莫桑比克",
u"萊索托": u"莱索托",
u"賴索托": u"莱索托",
u"貝寧": u"贝宁",
u"貝南": u"贝宁",
u"贊比亞": u"赞比亚",
u"尚比亞": u"赞比亚",
u"亞塞拜然": u"阿塞拜疆",
u"阿塞拜疆": u"阿塞拜疆",
u"阿拉伯聯合酋長國": u"阿拉伯联合酋长国",
u"阿拉伯聯合大公國": u"阿拉伯联合酋长国",
u"南韓": u"韩国",
u"馬爾代夫": u"马尔代夫",
u"馬爾地夫": u"马尔代夫",
u"馬爾他": u"马耳他",
u"馬利共和國": u"马里共和国",
u"即食麵": u"方便面",
u"快速面": u"方便面",
u"速食麵": u"方便面",
u"泡麵": u"方便面",
u"笨豬跳": u"蹦极跳",
u"绑紧跳": u"蹦极跳",
u"冷盤": u"凉菜",
u"冷菜": u"凉菜",
u"散钱": u"零钱",
u"谐星": u"笑星",
u"夜学": u"夜校",
u"华乐": u"民乐",
u"中樂": u"民乐",
u"屋价": u"房价",
u"的士": u"出租车",
u"計程車": u"出租车",
u"公車": u"公共汽车",
u"單車": u"自行车",
u"節慶": u"节日",
u"芝士": u"乾酪",
u"狗隻": u"犬只",
u"士多啤梨": u"草莓",
u"忌廉": u"奶油",
u"桌球": u"台球",
u"撞球": u"台球",
u"雪糕": u"冰淇淋",
u"衞生": u"卫生",
u"衛生": u"卫生",
u"賓士": u"奔驰",
u"平治": u"奔驰",
u"積架": u"捷豹",
u"福斯": u"大众",
u"福士": u"大众",
u"雪鐵龍": u"雪铁龙",
u"萬事得": u"马自达",
u"馬自達": u"马自达",
u"寶獅": u"标志",
u"拿破崙": u"拿破仑",
u"布殊": u"布什",
u"布希": u"布什",
u"柯林頓": u"克林顿",
u"克林頓": u"克林顿",
u"薩達姆": u"萨达姆",
u"海珊": u"萨达姆",
u"梵谷": u"凡高",
u"大衛碧咸": u"大卫·贝克汉姆",
u"米高奧雲": u"迈克尔·欧文",
u"卡佩雅蒂": u"珍妮弗·卡普里亚蒂",
u"沙芬": u"马拉特·萨芬",
u"舒麥加": u"迈克尔·舒马赫",
u"希特拉": u"希特勒",
u"黛安娜": u"戴安娜",
u"希拉": u"赫拉",
}
zh2SG = {
u"方便面": u"快速面",
u"速食麵": u"快速面",
u"即食麵": u"快速面",
u"蹦极跳": u"绑紧跳",
u"笨豬跳": u"绑紧跳",
u"凉菜": u"冷菜",
u"冷盤": u"冷菜",
u"零钱": u"散钱",
u"散紙": u"散钱",
u"笑星": u"谐星",
u"夜校": u"夜学",
u"民乐": u"华乐",
u"住房": u"住屋",
u"房价": u"屋价",
u"泡麵": u"快速面",
}
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase
from langconv import *
class ConvertMapTest(TestCase):
def test_map(self):
mapping = {'a': 'b', 'b': 'a', 'abc': 'cba', 'cb': 'bb'}
cm = ConvertMap('test', mapping)
self.assertEqual(len(cm), 6) # with switch node: 'ab' and 'c'
self.failUnless('a' in cm)
self.failUnless('c' in cm)
self.failIf('bc' in cm)
self.assertEqual(cm['a'].data, (True, True, 'b'))
self.assertEqual(cm['b'].data, (True, False, 'a'))
self.assertEqual(cm['c'].data, (False, True, ''))
self.assertEqual(cm['ab'].data, (False, True, ''))
self.assertEqual(cm['abc'].data, (True, False, 'cba'))
self.assertEqual(cm['cb'].data, (True, False, 'bb'))
class ConverterModelTest(TestCase):
def test_1(self):
registery('rev', {u'a': u'c', u'c': u'a'})
c = Converter('rev')
c.feed(u'a')
self.assertEqual(c.get_result(), u'c')
c.feed(u'b')
self.assertEqual(c.get_result(), u'cb')
c.feed(u'c')
self.assertEqual(c.get_result(), u'cba')
def test_2(self):
registery('2', {u'b': u'a', u'ab': u'ab'})
c = Converter('2')
c.feed(u'a')
self.assertEqual(c.get_result(), u'')
c.feed(u'b')
self.assertEqual(c.get_result(), u'ab')
def test_3(self):
registery('3', {u'a': u'b', u'ab': u'ba'})
c = Converter('3')
c.feed(u'a')
self.assertEqual(c.get_result(), u'')
c.feed(u'b')
self.assertEqual(c.get_result(), u'ba')
c.feed(u'a')
self.assertEqual(c.get_result(), u'ba')
c.feed(u'c')
self.assertEqual(c.get_result(), u'babc')
def test_4(self):
registery('4', {u'ab': u'ba'})
c = Converter('4')
c.feed(u'a')
self.assertEqual(c.get_result(), u'')
c.feed(u'b')
self.assertEqual(c.get_result(), u'ba')
c.feed(u'a')
self.assertEqual(c.get_result(), u'ba')
c.feed(u'c')
self.assertEqual(c.get_result(), u'baac')
def test_5(self):
registery('5', {u'ab': u'ba'})
c = Converter('5')
c.feed(u'a')
self.assertEqual(c.get_result(), u'')
c.feed(u'a')
self.assertEqual(c.get_result(), u'')
c.feed(u'b')
self.assertEqual(c.get_result(), u'aba')
def test_6(self):
registery('6', {u'abc': u'cba'})
c = Converter('6')
c.feed(u'a')
c.feed(u'b')
self.assertEqual(c.get_result(), u'')
c.feed(u'c')
self.assertEqual(c.get_result(), u'cba')
c.feed(u'a')
c.feed(u'b')
self.assertEqual(c.get_result(), u'cba')
c.feed(u'b')
self.assertEqual(c.get_result(), u'cbaabb')
def test_7(self):
registery('7', {u'abc': u'cba', u'bc': 'cb'})
c = Converter('7')
c.feed(u'a')
c.feed(u'b')
self.assertEqual(c.get_result(), u'')
c.feed(u'c')
self.assertEqual(c.get_result(), u'cba')
c.feed(u'a')
self.assertEqual(c.get_result(), u'cba')
c.feed(u'')
self.assertEqual(c.get_result(), u'cbaa')
def test_8(self):
registery('8', {u'abc': u'cba', u'ab': 'ba'})
c = Converter('8')
c.feed(u'a')
c.feed(u'b')
self.assertEqual(c.get_result(), u'')
c.feed(u'c')
self.assertEqual(c.get_result(), u'cba')
c.feed(u'a')
self.assertEqual(c.get_result(), u'cba')
c.feed(u'b')
self.assertEqual(c.get_result(), u'cba')
c.feed(u'b')
self.assertEqual(c.get_result(), u'cbabab')
def test_9(self):
registery('9', {u'bx': u'dx', u'c': u'e', u'cy': u'cy'})
c = Converter('9')
c.feed(u'a')
self.assertEqual(c.get_result(), u'a')
c.feed(u'b')
self.assertEqual(c.get_result(), u'a')
c.feed(u'c')
self.assertEqual(c.get_result(), u'a')
c.end()
self.assertEqual(c.get_result(), u'abe')
def test_10(self):
registery('10', {u'a': u'd', u'b': u'e', u'ab': u'cd', u'by': u'yy'})
c = Converter('10')
c.feed(u'a')
self.assertEqual(c.get_result(), u'')
c.feed(u'b')
self.assertEqual(c.get_result(), u'')
c.feed(u'c')
c.end()
self.assertEqual(c.get_result(), u'cdc')
class ConverterTest(TestCase):
def assertConvert(self, name, string, converted):
c = Converter(name)
new = c.convert(string)
assert new == converted, (
"convert(%s, '%s') should return '%s' but '%s'" % (
repr(name), string, converted, new)).encode('utf8')
def assertST(self, trad, simp):
self.assertConvert('zh-hans', trad, simp)
self.assertConvert('zh-hant', simp, trad)
def test_zh1(self):
self.assertST(u'乾燥', u'干燥')
self.assertST(u'乾坤', u'乾坤')
self.assertST(u'乾隆', u'乾隆')
self.assertST(u'幹事', u'干事')
self.assertST(u'牛肉乾', u'牛肉干')
self.assertST(u'相干', u'相干')
def test_zh2(self):
self.assertST(u'印表機', u'打印机')
self.assertST(u'說明檔案', u'帮助文件')
def test_zh3(self):
self.assertST(u'頭髮', u'头发')
self.assertST(u'頭髮和', u'头发和')
self.assertST(u'發生', u'发生')
self.assertST(u'頭髮和發生', u'头发和发生')
def test_zh4(self):
self.assertST(u'著名', u'著名')
self.assertST(u'覆蓋', u'覆盖')
self.assertST(u'翻來覆去', u'翻来覆去')
self.assertST(u'獃獃', u'呆呆')
self.assertST(u'獃住', u'呆住')
self.assertST(u'壁畫', u'壁画')
self.assertST(u'畫面', u'画面')
self.assertST(u'顯著', u'显著')
self.assertST(u'土著人', u'土著人')
self.assertST(u'長春鹼', u'长春碱')
self.assertST(u'嘌呤鹼', u'嘌呤碱')
self.assertST(u'嘧啶鹼', u'嘧啶碱')
if '__main__' == __name__:
import unittest
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from copy import deepcopy
import re
try:
import psyco
psyco.full()
except:
pass
from zh_wiki import zh2Hant, zh2Hans
# states
(START, END, FAIL, WAIT_TAIL) = range(4)
# conditions
(TAIL, ERROR, MATCHED_SWITCH, UNMATCHED_SWITCH, CONNECTOR) = range(5)
MAPS = {}
class Node:
def __init__(self, from_word, to_word=None, is_tail=True,
have_child=False):
self.from_word = from_word
if to_word is None:
self.to_word = from_word
self.data = (is_tail, have_child, from_word)
self.is_original = True
else:
self.to_word = to_word or from_word
self.data = (is_tail, have_child, to_word)
self.is_original = False
self.is_tail = is_tail
self.have_child = have_child
def is_original_long_word(self):
return self.is_original and len(self.from_word)>1
def is_follow(self, chars):
return chars != self.from_word[:-1]
def __str__(self):
return '<Node, %s, %s, %s, %s>' % (repr(self.from_word),
repr(self.to_word), self.is_tail, self.have_child)
__repr__ = __str__
class ConvertMap:
def __init__(self, name, mapping=None):
self.name = name
self._map = {}
if mapping:
self.set_convert_map(mapping)
def set_convert_map(self, mapping):
convert_map = {}
have_child = {}
max_key_length = 0
for key in sorted(mapping.keys()):
if len(key)>1:
for i in range(1, len(key)):
parent_key = key[:i]
have_child[parent_key] = True
have_child[key] = False
max_key_length = max(max_key_length, len(key))
for key in sorted(have_child.keys()):
convert_map[key] = (key in mapping, have_child[key],
mapping.get(key, ''))
self._map = convert_map
self.max_key_length = max_key_length
def __getitem__(self, k):
try:
is_tail, have_child, to_word = self._map[k]
return Node(k, to_word, is_tail, have_child)
except:
return Node(k)
def __contains__(self, k):
return k in self._map
def __len__(self):
return len(self._map)
class StatesMachineException(Exception): pass
class StatesMachine:
def __init__(self):
self.state = START
self.final = u''
self.len = 0
self.pool = u''
def clone(self, pool):
new = deepcopy(self)
new.state = WAIT_TAIL
new.pool = pool
return new
def feed(self, char, map):
node = map[self.pool+char]
if node.have_child:
if node.is_tail:
if node.is_original:
cond = UNMATCHED_SWITCH
else:
cond = MATCHED_SWITCH
else:
cond = CONNECTOR
else:
if node.is_tail:
cond = TAIL
else:
cond = ERROR
new = None
if cond == ERROR:
self.state = FAIL
elif cond == TAIL:
if self.state == WAIT_TAIL and node.is_original_long_word():
self.state = FAIL
else:
self.final += node.to_word
self.len += 1
self.pool = ''
self.state = END
elif self.state == START or self.state == WAIT_TAIL:
if cond == MATCHED_SWITCH:
new = self.clone(node.from_word)
self.final += node.to_word
self.len += 1
self.state = END
self.pool = ''
elif cond == UNMATCHED_SWITCH or cond == CONNECTOR:
if self.state == START:
new = self.clone(node.from_word)
self.final += node.to_word
self.len += 1
self.state = END
else:
if node.is_follow(self.pool):
self.state = FAIL
else:
self.pool = node.from_word
elif self.state == END:
# END is a new START
self.state = START
new = self.feed(char, map)
elif self.state == FAIL:
raise StatesMachineException('Translate States Machine '
'have error with input data %s' % node)
return new
def __len__(self):
return self.len + 1
def __str__(self):
return '<StatesMachine %s, pool: "%s", state: %s, final: %s>' % (
id(self), self.pool, self.state, self.final)
__repr__ = __str__
class Converter:
def __init__(self, to_encoding):
self.to_encoding = to_encoding
self.map = MAPS[to_encoding]
self.start()
def feed(self, char):
branches = []
for fsm in self.machines:
new = fsm.feed(char, self.map)
if new:
branches.append(new)
if branches:
self.machines.extend(branches)
self.machines = [fsm for fsm in self.machines if fsm.state != FAIL]
all_ok = True
for fsm in self.machines:
if fsm.state != END:
all_ok = False
if all_ok:
self._clean()
return self.get_result()
def _clean(self):
if len(self.machines):
self.machines.sort(cmp=lambda x,y: cmp(len(x), len(y)))
self.final += self.machines[0].final
self.machines = [StatesMachine()]
def start(self):
self.machines = [StatesMachine()]
self.final = u''
def end(self):
self.machines = [fsm for fsm in self.machines
if fsm.state == FAIL or fsm.state == END]
self._clean()
def convert(self, string):
self.start()
for char in string:
self.feed(char)
self.end()
return self.get_result()
def get_result(self):
return self.final
def registery(name, mapping):
global MAPS
MAPS[name] = ConvertMap(name, mapping)
registery('zh-hant', zh2Hant)
registery('zh-hans', zh2Hans)
del zh2Hant, zh2Hans
def run():
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-e', type='string', dest='encoding',
help='encoding')
parser.add_option('-f', type='string', dest='file_in',
help='input file (- for stdin)')
parser.add_option('-t', type='string', dest='file_out',
help='output file')
(options, args) = parser.parse_args()
if not options.encoding:
parser.error('encoding must be set')
if options.file_in:
if options.file_in == '-':
file_in = sys.stdin
else:
file_in = open(options.file_in)
else:
file_in = sys.stdin
if options.file_out:
if options.file_out == '-':
file_out = sys.stdout
else:
file_out = open(options.file_out, 'w')
else:
file_out = sys.stdout
c = Converter(options.encoding)
for line in file_in:
print >> file_out, c.convert(line.rstrip('\n').decode(
'utf8')).encode('utf8')
if __name__ == '__main__':
run()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from copy import deepcopy
import re
try:
import psyco
psyco.full()
except:
pass
from zh_wiki import zh2Hant, zh2Hans
# states
(START, END, FAIL, WAIT_TAIL) = range(4)
# conditions
(TAIL, ERROR, MATCHED_SWITCH, UNMATCHED_SWITCH, CONNECTOR) = range(5)
MAPS = {}
class Node:
def __init__(self, from_word, to_word=None, is_tail=True,
have_child=False):
self.from_word = from_word
if to_word is None:
self.to_word = from_word
self.data = (is_tail, have_child, from_word)
self.is_original = True
else:
self.to_word = to_word or from_word
self.data = (is_tail, have_child, to_word)
self.is_original = False
self.is_tail = is_tail
self.have_child = have_child
def is_original_long_word(self):
return self.is_original and len(self.from_word)>1
def is_follow(self, chars):
return chars != self.from_word[:-1]
def __str__(self):
return '<Node, %s, %s, %s, %s>' % (repr(self.from_word),
repr(self.to_word), self.is_tail, self.have_child)
__repr__ = __str__
class ConvertMap:
def __init__(self, name, mapping=None):
self.name = name
self._map = {}
if mapping:
self.set_convert_map(mapping)
def set_convert_map(self, mapping):
convert_map = {}
have_child = {}
max_key_length = 0
for key in sorted(mapping.keys()):
if len(key)>1:
for i in range(1, len(key)):
parent_key = key[:i]
have_child[parent_key] = True
have_child[key] = False
max_key_length = max(max_key_length, len(key))
for key in sorted(have_child.keys()):
convert_map[key] = (key in mapping, have_child[key],
mapping.get(key, ''))
self._map = convert_map
self.max_key_length = max_key_length
def __getitem__(self, k):
try:
is_tail, have_child, to_word = self._map[k]
return Node(k, to_word, is_tail, have_child)
except:
return Node(k)
def __contains__(self, k):
return k in self._map
def __len__(self):
return len(self._map)
class StatesMachineException(Exception): pass
class StatesMachine:
def __init__(self):
self.state = START
self.final = u''
self.len = 0
self.pool = u''
def clone(self, pool):
new = deepcopy(self)
new.state = WAIT_TAIL
new.pool = pool
return new
def feed(self, char, map):
node = map[self.pool+char]
if node.have_child:
if node.is_tail:
if node.is_original:
cond = UNMATCHED_SWITCH
else:
cond = MATCHED_SWITCH
else:
cond = CONNECTOR
else:
if node.is_tail:
cond = TAIL
else:
cond = ERROR
new = None
if cond == ERROR:
self.state = FAIL
elif cond == TAIL:
if self.state == WAIT_TAIL and node.is_original_long_word():
self.state = FAIL
else:
self.final += node.to_word
self.len += 1
self.pool = ''
self.state = END
elif self.state == START or self.state == WAIT_TAIL:
if cond == MATCHED_SWITCH:
new = self.clone(node.from_word)
self.final += node.to_word
self.len += 1
self.state = END
self.pool = ''
elif cond == UNMATCHED_SWITCH or cond == CONNECTOR:
if self.state == START:
new = self.clone(node.from_word)
self.final += node.to_word
self.len += 1
self.state = END
else:
if node.is_follow(self.pool):
self.state = FAIL
else:
self.pool = node.from_word
elif self.state == END:
# END is a new START
self.state = START
new = self.feed(char, map)
elif self.state == FAIL:
raise StatesMachineException('Translate States Machine '
'have error with input data %s' % node)
return new
def __len__(self):
return self.len + 1
def __str__(self):
return '<StatesMachine %s, pool: "%s", state: %s, final: %s>' % (
id(self), self.pool, self.state, self.final)
__repr__ = __str__
class Converter:
def __init__(self, to_encoding):
self.to_encoding = to_encoding
self.map = MAPS[to_encoding]
self.start()
def feed(self, char):
branches = []
for fsm in self.machines:
new = fsm.feed(char, self.map)
if new:
branches.append(new)
if branches:
self.machines.extend(branches)
self.machines = [fsm for fsm in self.machines if fsm.state != FAIL]
all_ok = True
for fsm in self.machines:
if fsm.state != END:
all_ok = False
if all_ok:
self._clean()
return self.get_result()
def _clean(self):
if len(self.machines):
self.machines.sort(cmp=lambda x,y: cmp(len(x), len(y)))
self.final += self.machines[0].final
self.machines = [StatesMachine()]
def start(self):
self.machines = [StatesMachine()]
self.final = u''
def end(self):
self.machines = [fsm for fsm in self.machines
if fsm.state == FAIL or fsm.state == END]
self._clean()
def convert(self, string):
self.start()
for char in string:
self.feed(char)
self.end()
return self.get_result()
def get_result(self):
return self.final
def registery(name, mapping):
global MAPS
MAPS[name] = ConvertMap(name, mapping)
registery('zh-hant', zh2Hant)
registery('zh-hans', zh2Hans)
del zh2Hant, zh2Hans
def run():
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-e', type='string', dest='encoding',
help='encoding')
parser.add_option('-f', type='string', dest='file_in',
help='input file (- for stdin)')
parser.add_option('-t', type='string', dest='file_out',
help='output file')
(options, args) = parser.parse_args()
if not options.encoding:
parser.error('encoding must be set')
if options.file_in:
if options.file_in == '-':
file_in = sys.stdin
else:
file_in = open(options.file_in)
else:
file_in = sys.stdin
if options.file_out:
if options.file_out == '-':
file_out = sys.stdout
else:
file_out = open(options.file_out, 'w')
else:
file_out = sys.stdout
c = Converter(options.encoding)
for line in file_in:
print >> file_out, c.convert(line.rstrip('\n').decode(
'utf8')).encode('utf8')
if __name__ == '__main__':
run()
| Python |
#coding=utf-8
from ripper.core import EncodingConvert
from ripper.core.Exceptions import DownloadException
import uuid
import urllib,sys,os,re,time
from ripper.core.Utils import enable_proxy, disable_proxy #@UnresolvedImport
from ripper.core.Utils import clear_url #@UnresolvedImport
import socket
import urlparse
import md5
try:
import Image # request PIL 1.1.6
except ImportError:
print 'PIL 1.1.6 required'
socket.setdefaulttimeout(35)
NAME_MAP = ('a b c d e f g h i j k l m n o p q r s t u v w x y z').split()
__doc__='''
http下载相关
'''
class HttpHandler:
''' http下载类 '''
def __init__(self, baseDir, useProxy=False):
self.baseDir = baseDir
self.defaultUseProxy = useProxy
self.useProxy = useProxy
self.proxies = {}
if useProxy == True:
self.enableProxy()
def enableProxy(self):
self.useProxy = True
# for url in Utils.getConfig('httpproxy').split(','):
# self.proxies['http'] = url
enable_proxy()
def disableProxy(self):
self.useProxy = False
self.proxies = {}
disable_proxy()
# 下载html页面
def getContent(self,burl,needConvert=False):
burl = clear_url(burl)
conn = urllib.urlopen(burl,proxies=self.proxies)
tstart = time.time()
content = conn.read()
tcost = str(time.time() - tstart)
noticeText = 'I have parsed '+burl+',It costs'+tcost+' seconds'+ (self.useProxy == True and '(throught proxy)' or '')
encc,content = EncodingConvert.zh2utf8(content)
print 'page encoding:',encc
if float(tcost) > 30.0 :
noticeText = noticeText + ', What the fuck, why takes so long...'
elif float(tcost) > 100.0 :
noticeText = noticeText + 'dude, you may consult 10000 :)'
# 文本格式转换
if needConvert == True:
content = content.replace('\r\n','\n')
return content
def getImage_safe(self,imgUrl, retrys=3):
for i in range(0, retrys):
try:
vals = self.getImage(imgUrl)
except DownloadException, ex:
print '%s, retry %d times remaining...' % (imgUrl, retrys-i)
time.sleep(3)
continue
return vals
return '404.jpg', -1, (0, 0)
# 通过url下载图片,返回保存在本地的filename
def getImage(self,imgUrl,preAssignFilename=None, fixed=False):
# imgUrl = fixurl(imgUrl)
filename = None
if None == preAssignFilename :
filename = get_file_name(imgUrl)
else:
filename = preAssignFilename
try:
opener = urllib.FancyURLopener(self.proxies)
imgDir = self.baseDir
tstart = time.time()
fn,headers = opener.retrieve(imgUrl)
tp = str(headers.gettype())
# 根据header的type判断文件类型并添加扩展名
if re.match('.*?jp[e]*g.*',tp):
filename = filename + '.jpg'
elif re.match('.*?gif.*',tp):
filename = filename + '.gif'
elif re.match('.*?bmp.*',tp):
filename = filename + '.bmp'
elif re.match('.*?png.*',tp):
filename = filename + '.bmp'
elif tp == 'application/octet-stream':
filename = filename + os.path.basename(fn)
elif 'image' not in tp:
# 非图片内容
if fixed == False:
return self.getImage(fixurl(imgUrl),fixed=True)
else:
raise DownloadException(u'%s not a images: %s' %( imgUrl ,tp) )
# 保存文件
absName = os.path.join(imgDir, filename)
ct = open(fn,'rb').read()
if len(ct) < 10:
raise DownloadException('image too small')
f = open(absName,'wb')
f.write(ct)
f.close()
tcost = str(time.time() - tstart)[0:5]
notice = 'Download finished:'+filename+',costs'+tcost+' seconds.'+ (self.useProxy == True and '(throught proxy)' or '')
fl = -1
# compress img
image_size = (0, 0)
# 最宽图片大小
maxwidth = 700
try:
img = Image.open(os.path.join(imgDir, filename))
image_size = img.size
if image_size[0] > maxwidth:
resizeimage(os.path.join(imgDir, filename), maxwidth)
else:
img.save(os.path.join(imgDir, filename))
img.close()
except Exception:
pass
# get file size
try:
fl = os.path.getsize(os.path.join(imgDir, filename))
except Exception:
pass
return filename,fl,image_size
except UnicodeError, err:
print err
if fixed == False:
return self.getImage(fixurl(imgUrl),fixed=True)
if self.useProxy == True:
self.disableProxy()
raise DownloadException(u'Download failed:' + imgUrl)
else:
self.enableProxy()
return self.getImage(imgUrl,filename)
except Exception, ex:
print ex
raise DownloadException(u'Download failed:' + imgUrl)
finally:
if self.useProxy == True:
self.disableProxy()
def getFile(self,imgUrl,preAssignFilename=None, fixed=False):
filename = None
if None == preAssignFilename :
filename = get_file_name(imgUrl)
else:
filename = preAssignFilename
try:
opener = urllib.FancyURLopener(self.proxies)
imgDir = self.baseDir
tstart = time.time()
fn,headers = opener.retrieve(imgUrl)
tp = str(headers.gettype())
# 根据header的type判断文件类型并添加扩展名
if re.match('.*?jp[e]*g.*',tp):
filename = filename + '.jpg'
elif re.match('.*?gif.*',tp):
filename = filename + '.gif'
elif re.match('.*?bmp.*',tp):
filename = filename + '.bmp'
elif re.match('.*?png.*',tp):
filename = filename + '.bmp'
elif re.match('.*?torrent.*',tp):
filename = filename + '.torrent'
elif tp == 'application/octet-stream':
filename = filename + os.path.basename(fn)
elif 'image' not in tp:
# 非图片内容
if fixed == False:
return self.getImage(fixurl(imgUrl),fixed=True)
else:
raise DownloadException(u'not a images:' + imgUrl)
# 保存文件
absName = os.path.join(imgDir, filename)
ct = open(fn,'rb').read()
if len(ct) < 10:
raise DownloadException('image too small')
f = open(absName,'wb')
f.write(ct)
f.close()
tcost = str(time.time() - tstart)[0:5]
notice = 'Download finished:'+filename+',costs'+tcost+' seconds.'+ (self.useProxy == True and '(throught proxy)' or '')
fl = -1
# compress img
try:
Image.open(os.path.join(imgDir, filename)).save(os.path.join(imgDir, filename))
except Exception:
pass
# get file size
try:
fl = os.path.getsize(os.path.join(imgDir, filename))
except Exception:
pass
return filename,fl
except UnicodeError, err:
print err
if fixed == False:
return self.getImage(fixurl(imgUrl),fixed=True)
if self.useProxy == True:
self.disableProxy()
raise DownloadException(u'Download failed:' + imgUrl)
else:
self.enableProxy()
return self.getImage(imgUrl,filename)
except Exception, ex:
print ex
raise DownloadException(u'Download failed:' + imgUrl)
finally:
if self.useProxy == True:
self.disableProxy()
def fixurl(url):
# turn string into unicode
if not isinstance(url,unicode):
url = url.decode('utf8')
# parse it
parsed = urlparse.urlsplit(url)
# divide the netloc further
userpass,at,hostport = parsed.netloc.partition('@')
user,colon1,pass_ = userpass.partition(':')
host,colon2,port = hostport.partition(':')
# encode each component
scheme = parsed.scheme.encode('utf8')
user = urllib.quote(user.encode('utf8'))
colon1 = colon1.encode('utf8')
pass_ = urllib.quote(pass_.encode('utf8'))
at = at.encode('utf8')
host = host.encode('idna')
colon2 = colon2.encode('utf8')
port = port.encode('utf8')
path = '/'.join( # could be encoded slashes!
urllib.quote(urllib.unquote(pce).encode('utf8'),'')
for pce in parsed.path.split('/')
)
query = urllib.quote(urllib.unquote(parsed.query).encode('utf8'),'=&?/')
fragment = urllib.quote(urllib.unquote(parsed.fragment).encode('utf8'))
# put it back together
netloc = ''.join((user,colon1,pass_,at,host,colon2,port))
return urlparse.urlunsplit((scheme,netloc,path,query,fragment))
# 生成UUID, 命名图片和文本文件
# 修改成固定文件名, 根据url生成
def get_file_name(url):
m = md5.new()
m.update(url)
md = m.hexdigest()
md = str(md)
return md
def getId():
id0 = str(hex(int(str(time.time()).replace('.',''))))
id1 = str(uuid.uuid1())
return id0+'-'+id1
def resizeimage(imgpath, scaleWidth):
img = Image.open(imgpath)
orgWidth, orgHeigth = img.size
if orgWidth > scaleWidth:
ratio = orgWidth / scaleWidth
scaleHeigth = orgHeigth / ratio
# call pil to resize the image
size = (int(scaleWidth), int(scaleHeigth))
img.thumbnail(size, Image.ANTIALIAS)
img.save(imgpath)
if __name__ == '__main__':
# uri = 'http://img3.douban.com/pview/event_p2ster/large/public/0e46182405722ce.jpg'
# dd = HttpHandler('e:/datas')
# print dd.getImage(uri)
# print get_file_name('http://www.asd.dd.d3/dss.jpg')
resizeimage('c:/dx/mpl-studios-ava-cup-of-tea-72.jpg', 500)
| Python |
#coding=utf-8
import os, re
from ripper.handler.Policy import torrentProxyPolicy
from ripper.core.Utils import getId, enable_proxy, disable_proxy #@UnresolvedImport
from ripper.core.Utils import clear_url
from urllib import urlopen, urlencode
CODE_MAP = {
'http://www.haoseed.com/' : 'ref',
'http://www.jandown.com/' : 'code',
'http://www.mimima.com/' : 'code',
}
URL_PATTERNS = [
r'.*?(http\://www[0-9]*\.jandown.com/link\.php\?ref=[a-z,A-Z,0-9]{10})',
r'.*?(http\://www[0-9]*\.mimima.com/link\.php\?ref=[a-z,A-Z,0-9]{10})',
r'.*?(http\://www[0-9]*\.haoseed.com/ref\.php\?ref=[a-z,A-Z,0-9]{10})',
]
def info(msg):
print msg
# 正则表达式匹配种子地址
def reFindUrl(url):
url = url.replace(' ','')
url = url.replace('@','')
for pattern in URL_PATTERNS:
for m in re.findall(pattern, url):
return m
return None
def readConfig(name):
if name == 'httpproxy':
return ''
def canDownload(turl):
if turl == None or len(turl) < 45:
return False
if turl.lower().find('haoseed') != -1:
return True
if turl.lower().find('jandown') != -1 or turl.lower().find('mimima') != -1:
# if turl.lower().find('jandown') != -1 :
return True
return False
class TorrentDownloader:
def __init__(self,torrentUrl,proxy={}):
# 正则表达式匹配种子地址
clearedUrl = reFindUrl(torrentUrl)
if clearedUrl == None:
torrentUrl = clear_url(torrentUrl)
else:
torrentUrl = clearedUrl
self.proxy = proxy
self.torrentUrl = torrentUrl
self.siteName = self.getSiteName(self.torrentUrl)
self.fetchUrl = self.getFetchUrl(self.torrentUrl)
self.code = self.getCode(self.torrentUrl)
self.postName = CODE_MAP[self.siteName] # form input for mimima and jandown
def download(self, dirName):
try:
# check proxy policy for the site
# if torrentProxyPolicy.has_key(self.siteName):
# if torrentProxyPolicy[self.siteName] == True:
# info('Using proxy for '+self.siteName)
# #self.proxy['http'] = readConfig('httpproxy')
# enable_proxy()
conn = urlopen(self.fetchUrl,urlencode({self.postName:self.code}))
# conn = urlopen(self.fetchUrl,urlencode({self.postName:self.code}),\
# proxies=self.proxy)
filename = getDownloadedFileName()
f = os.path.join(dirName , filename)
df = open(f,'wb')
df.write(conn.read())
df.close()
conn.close()
return filename, os.path.getsize(f)
except Exception, ex:
print ex
# finally:
# disable_proxy()
def getCode(self,turl):
if turl.lower().find('haoseed') :
if len(turl) > 10:
return turl[-10:]
if turl.lower().find('jandown') != -1 or turl.lower().index('mimima'):
if len(turl) > 45:
return turl[36:46]
def getSiteName(self,turl):
if turl.lower().find('jandown') != -1:
return 'http://www.jandown.com/'
if turl.lower().find('mimima') != -1:
return 'http://www.mimima.com/'
if turl.lower().find('haoseed') != -1:
return 'http://www.haoseed.com/'
def getFetchUrl(self,turl):
if self.siteName.find('haoseed') != -1:
return self.siteName+'download.php'
if self.siteName.find('jandown') != -1 or turl.lower().index('mimima'):
return self.siteName+'fetch.php'
def getDownloadedFileName():
return getId()+'.torrent'
def __download(pageUrl,proxy={}):
info('downloading torrent from '+pageUrl)
td = TorrentDownloader(pageUrl,proxy)
filename = ''
try :
filename = td.download()
except Exception:
info('error downloading torrent file:'+str(pageUrl))
info('Torrent downloaded')
return filename
if __name__ == '__main__':
# import os
# print os.path.getsize('../torrents/0x1bf588f668L-307d62e1-c75a-11dc-994b-00f0f43057f4.torrent')
# post = Post.get(2)
# for u in post.urls:
# if u == None : continue
# if canDownload(u.name) == True:
# downloadForPost(post,u)
testUrls = [
'''http: //www6.mimima.com/link.php?ref=TFheYRHz1z'''
,"http://www.jandown.com/link.php?ref=oLcbdoYSc8'target=_blank>http://www.jandown.com/link.php?ref=oLcbdoYSc8</a><br><br>东南亚白胖子,谁帮忙发出去<br></span><br>"
,'http://www.jandown.com/link.php?ref=pozJ5cXldI'
,'''http://pics.dmm.co.jp/mono/movie/h_254yume037/h_254yume037pl.jpg'border=0onclick="window.open('http://www2.lookpipe.com/get.php?filepath=http://pics.dmm.co.jp/mono/movie/h_254yume037/h_254yume037pl.jpg');"><br><br><imgsrc='http://i.pixs.ru/storage/0/4/3/3856312398_4590791_2096043.jpg'border=0onclick="window.open('http://www2.lookpipe.com/get.php?filepath=http://i.pixs.ru/storage/0/4/3/3856312398_4590791_2096043.jpg');"><br><br>LinkURL:<ahref='http://www.jandown.com/link.php?ref=2tRZtgu42Z'target=_blank>http://www.jandown.com/link.php?ref=2tRZtgu42Z</a><br>'''
,'''http://i.minus.com/ik50VrnSZLDlx.jpg'border=0onclick="window.open('http://www2.lookpipe.com/get.php?filepath=http://i.minus.com/ik50VrnSZLDlx.jpg');"><br><br> <imgsrc='http://i.minus.com/iDnoLEtqOWI7a.jpg'border=0onclick="window.open('http://www2.lookpipe.com/get.php?filepath=http://i.minus.com/iDnoLEtqOWI7a.jpg');"><br><br> <imgsrc='http://i.minus.com/iBpzcYtJXjwEi.jpg'border=0onclick="window.open('http://www2.lookpipe.com/get.php?filepath=http://i.minus.com/iBpzcYtJXjwEi.jpg');"><br><br> <imgsrc='http://i.minus.com/iplv4IaApHrvJ.jpg'border=0onclick="window.open('http://www2.lookpipe.com/get.php?filepath=http://i.minus.com/iplv4IaApHrvJ.jpg');"><br><br> <imgsrc='http://i.minus.com/iiJwbpzI7ZDgK.jpg'border=0onclick="window.open('http://www2.lookpipe.com/get.php?filepath=http://i.minus.com/iiJwbpzI7ZDgK.jpg');"><br><br> <imgsrc='http://i.minus.com/i8rmu3GuGszEx.jpg'border=0onclick="window.open('http://www2.lookpipe.com/get.php?filepath=http://i.minus.com/i8rmu3GuGszEx.jpg');"><br><br> <imgsrc='http://i.minus.com/iNdf7DMud7h8g.jpg'border=0onclick="window.open('http://www2.lookpipe.com/get.php?filepath=http://i.minus.com/iNdf7DMud7h8g.jpg');"><br><br> <imgsrc='http://i.minus.com/iOoVFZPm3DdTi.jpg'border=0onclick="window.open('http://www2.lookpipe.com/get.php?filepath=http://i.minus.com/iOoVFZPm3DdTi.jpg');"><br><br>LinkURL:<ahref='http://www.jandown.com/link.php?ref=2RwLzeYdBf'target=_blank>http://www.jandown.com/link.php?ref=2RwLzeYdBf</a><br>BBCODE:<ahref='http://www.jandown.com/link.php?ref=2RwLzeYdBf'target=_blank>http://www.jandown.com/link.php?ref=2RwLzeYdBf</a><br></span><br>'''
,'http: //www6.mimima.com/link.php?ref=Y45Vtt5gZx<\/a><br><\/span><br>'
]
for url in testUrls:
if not canDownload(url):
print 'FUCK'
name, fl = TorrentDownloader(url).download('c:/temp')
print name, fl
| Python |
torrentProxyPolicy = {
'http://www.jandown.com/' : False,
'http://www.haoseed.com/' : False,
'http://www.mimima.com/' : True
}
| Python |
import re
import mechanize
import mimetypes
import sys
import os
import getpass
from optparse import OptionParser
from CompressURL import Compress
# _____ _____ _ _ _ _
# | ___| ____| | | |_ __ _ _| | ___ __ _ __| |
# | |_ | _| | | | | '_ \| | | | | / _ \ / _` |/ _` |
# | _| | |___| |_| | |_) | |_| | |__| (_) | (_| | (_| |
# |_| |_____|\___/| .__/ \__, |_____\___/ \__,_|\__,_|
# |_| |___/
# .--.
# |o_o |
# |:_/ |
# // \ \
# (| | )
# /'\_ _/`\
# \___)=(___/
class Client:
def __init__(self, url):
"""Inicializia o cliente e abre automaticamente a pagina principal
do site.
"""
self.browser = mechanize.Browser()
self.LINK = url
self.browser.open(url)
self.TITLE = self.browser.title()
# Se o metodo login conseguir efectuar o login sem qualquer problemas
# este atributo tera o valor True.
# Desta forma, o enviarFicheiro podera verificar se pode ou nao enviar
# os ficheiros pedidos
self.sucess_login = False
def login(self, username, password):
"""Efectua o login no site.
Se o login for efectuado com sucesso, devolve True, caso contrario
devolve False.
"""
self.browser.select_form(nr=0)
self.browser["user[username]"] = username
self.browser["user[password]"] = password
self.browser.submit()
if self.TITLE != self.browser.title():
self.sucess_login = True
return True
else:
return False
def enviarFicheiro(self, nomeficheiro):
"""Recebe o caminho completo para o ficheiro a enviar.
Devolve False se o utilizador nao se encontra registado no site, caso
contrario, devolve o link para o ficheiro que foi enviado.
"""
if not self.sucess_login:
return False
ficheiro = open(nomeficheiro)
# O mimetype e necessario para o servidor saber qual o tipo de ficheiro
# que estamos a fazer upload
mime = mimetypes.guess_type(nomeficheiro)[0]
self.browser.select_form(nr=0)
self.browser.form.add_file(ficheiro, mime, nomeficheiro)
self.browser.submit()
# Encontrar o link do ficheiro que acabou de ser enviado
for link in self.browser.links():
if link.text == self._obter_filename(nomeficheiro):
return self.LINK + link.url
def _obter_filename(self, nomeficheiro):
"""Devolve a parte final do argumentos "nomeficheiro".
"""
return nomeficheiro.split(os.path.sep)[-1]
def argvParser():
"""Devolve uma "classe" que permite aceder ao username, password e filename
necessarios para o funcionamento do programa.
Se o utilizador nao tiver passado a sua password na linha de comandos
pedimos para ele a inserir, usando o modulo getpass.
"""
parser = OptionParser()
parser.add_option("-f", "--file", action="store", dest="filename", help="Nome do Ficheiro", metavar="FILE")
parser.add_option("-u", "--username", action="store", dest="username", help="Username do SIFEUP")
parser.add_option("-p", "--password", action="store", dest="password", help="Password do SIFEUP")
parser.add_option("-c", "--compress", action="store", dest="compress", help="Comprimir URL no http://is.gd")
(options, args) = parser.parse_args()
# Pode haver quem nao queira mandar as password em plaintext na linha
# de comandos
if not options.password and options.filename and options.username:
options.password = getpass.getpass("Por favor insira a password: ")
return options
if __name__ == "__main__":
argumentos = argvParser()
if not argumentos.filename:
print "POR FAVOR, ENVIE UM FICHEIRO. Para ajuda faca -h ou --help"
else:
client = Client("http://feupload.fe.up.pt")
print "A FAZER LOGIN..."
if argumentos.username:
if client.login(argumentos.username, argumentos.password):
print "LOGIN OK, A ENVIAR FICHEIRO..."
link = client.enviarFicheiro(argumentos.filename)
if argumentos.compress == "y":
link = Compress(link)
print "FICHEIRO ENVIADO PARA: " + link
else:
print "O LOGIN FALHOU! USERNAME E/OU PASSWORD ERRADOS?"
| Python |
import re
import mechanize
def Compress(url):
LINK = "http://is.gd"
browser = mechanize.Browser()
browser.open(LINK)
browser.select_form(nr=0)
browser["URL"] = url
browser.submit()
for link in browser.links():
if link.url == link.text:
return link.url
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chromium_extension.py
import array
import hashlib
import logging
import optparse
import os
import random
import re
import shutil
import sys
import zipfile
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
ignore_dirs = [".svn", "CVS"]
ignore_files = [re.compile(".*~")]
MANIFEST_FILENAME = "manifest.json"
class ExtensionDir:
def __init__(self, path):
self._root = os.path.abspath(path)
self._dirs = []
self._files = []
for root, dirs, files in os.walk(path, topdown=True):
for dir in ignore_dirs:
if dir in dirs:
dirs.remove(dir)
root = os.path.abspath(root)
for dir in dirs:
self._dirs.append(os.path.join(root, dir))
for f in files:
for match in ignore_files:
if not match.match(f):
self._files.append(os.path.join(root, f))
def validate(self):
if os.path.join(self._root, MANIFEST_FILENAME) not in self._files:
logging.error("package is missing a valid %s file" % MANIFEST_FILENAME)
return False
return True
def writeToPackage(self, path):
if not self.validate():
return False
try:
f = open(os.path.join(self._root, MANIFEST_FILENAME), "r")
manifest = json.load(f)
f.close()
# Temporary hack: If the manifest doesn't have an ID, generate a random
# one. This is to make it easier for people to play with the extension
# system while we don't have the real ID mechanism in place.
if not "id" in manifest:
random_id = ""
for i in range(0, 40):
random_id += "0123456789ABCDEF"[random.randrange(0, 15)]
logging.info("Generated extension ID: %s" % random_id)
manifest["id"] = random_id;
f = open(os.path.join(self._root, MANIFEST_FILENAME), "w")
f.write(json.dumps(manifest, sort_keys=True, indent=2));
f.close();
zip_path = path + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
zip = zipfile.ZipFile(zip_path, "w")
(root, dir) = os.path.split(self._root)
root_len = len(self._root)
for file in self._files:
arcname = file[root_len+1:]
logging.debug("%s: %s" % (arcname, file))
zip.write(file, arcname)
zip.close()
zip = open(zip_path, mode="rb")
hash = hashlib.sha256()
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
hash.update(buf)
zip.close()
manifest["zip_hash"] = hash.hexdigest()
# This is a bit odd - we're actually appending a new zip file to the end
# of the manifest. Believe it or not, this is actually an explicit
# feature of the zip format, and many zip utilities (this library
# and three others I tried) can still read the underlying zip file.
if os.path.exists(path):
os.remove(path)
out = open(path, "wb")
out.write("Cr24") # Extension file magic number
# The rest of the header is currently made up of three ints:
# version, header size, manifest size
header = array.array("l")
header.append(1) # version
header.append(16) # header size
manifest_json = json.dumps(manifest);
header.append(len(manifest_json)) # manifest size
header.tofile(out)
out.write(manifest_json);
zip = open(zip_path, "rb")
while True:
buf = zip.read(32 * 1024)
if not len(buf):
break
out.write(buf)
zip.close()
out.close()
os.remove(zip_path)
logging.info("created extension package %s" % path)
except IOError, (errno, strerror):
logging.error("error creating extension %s (%d, %s)" % (path, errno,
strerror))
try:
if os.path.exists(path):
os.remove(path)
except:
pass
return False
return True
class ExtensionPackage:
def __init__(self, path):
zip = zipfile.ZipFile(path)
error = zip.testzip()
if error:
logging.error("error reading extension: %s", error)
return
logging.info("%s contents:" % path)
files = zip.namelist()
for f in files:
logging.info(f)
def Run():
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
parser = optparse.OptionParser("usage: %prog --indir=<dir> --outfile=<file>")
parser.add_option("", "--indir",
help="an input directory where the extension lives")
parser.add_option("", "--outfile",
help="extension package filename to create")
(options, args) = parser.parse_args()
if not options.indir:
parser.error("missing required option --indir")
if not options.outfile:
parser.error("missing required option --outfile")
ext = ExtensionDir(options.indir)
ext.writeToPackage(options.outfile)
pkg = ExtensionPackage(options.outfile)
return 0
if __name__ == "__main__":
retcode = Run()
sys.exit(retcode)
| Python |
#! python
from elixir import *
from model import *
import datetime
class Hub(object):
def __init__(self, name):
self.name = name
def pop_entite(self):
## SRA la racine
e = Entite()
e.codent = u"SRA"
e.nom = u"SRA INFORMATIQUE"
e.typ = u'ROOT'
session.commit()
## Un Client
e = Entite()
e.codent = u"CLIENT1"
e.nom = u"CLIENT No 1"
e.typ = u'CLIENT'
session.commit()
## Un autre client
e = Entite()
e.codent = u"CLIENT2"
e.nom = u"CLIENT No 2"
e.typ = u'CLIENT'
session.commit()
def pop_user(self):
SRA = Entite.get_by(codent=u"SRA")
## Admin SRA
u = User()
u.codusr = u"SRA_ADM1"
u.name = u"Admin SRA"
u.entite = SRA
u.IsAdmin = True
session.commit()
## User SRA
u = User()
u.codusr = u"SRA_USR1"
u.name = u"USER SRA"
u.entite = SRA
u.IsAdmin = False
session.commit()
## User CLIENT1
CLI1 = Entite.get_by(codent=u"CLIENT1")
u = User()
u.codusr = u"CLI1_ADM"
u.name = u"Admin CLIENT1"
u.entite = CLI1
u.IsAdmin = True
session.commit()
u = User()
u.codusr = u"CLI1_USR1"
u.name = u"USER1 CLIENT1"
u.entite = CLI1
u.IsAdmin = False
session.commit()
u = User()
u.codusr = u"CLI1_USR2"
u.name = u"USER2 CLIENT1"
u.entite = CLI1
u.IsAdmin = False
session.commit()
## User CLIENT2
CLI2 = Entite.get_by(codent=u"CLIENT2")
u = User()
u.codusr = u"CLI2_ADM"
u.name = u"Admin CLIENT2"
u.entite = CLI2
u.IsAdmin = True
session.commit()
u = User()
u.codusr = u"CLI2_USR1"
u.name = u"USER2 CLIENT2"
u.entite = CLI2
u.IsAdmin = False
session.commit()
def pop_fex(self):
f = Fex()
f.nom = u"FEX1"
f.description = u"1er echange de fichier"
f.status = u"CREE"
f.expediteur = User.get_by(codusr=u'SRA_USR1')
f.destinataire = User.get_by(codusr=u'CLI1_USR1')
session.commit()
f = Fex()
f.nom = u"FEX2"
f.description = u"2eme echange de fichier"
f.status = u"CREE"
f.expediteur = User.get_by(codusr=u'CLI2_USR1')
f.destinataire = User.get_by(codusr=u'SRA_USR1')
session.commit()
def populate(self):
self.pop_entite()
self.pop_user()
self.pop_fex()
def init_base(self):
setup_all(True)
create_all()
self.populate()
def test():
H = Hub('HUB1')
H.init_base()
if __name__ == '__main__':
test()
| Python |
from elixir import *
import datetime
metadata.bind = "sqlite:///fex.dbf"
metadata.bind.echo = False
class Entite(Entity):
using_options(tablename=u'ENTITE')
codent = Field(Unicode(10))
nom = Field(Unicode(30))
description = Field(UnicodeText, default=u"" )
typ = Field(Unicode(10), default=u'CLIENT')
parent = ManyToOne(u'Entite')
def __repr__(self):
return '<Entite : %s : %s : %s>' % (self.codent, self.typ, self.parent)
class User(Entity):
using_options(tablename=u'USER')
codusr = Field(Unicode(10))
name = Field(Unicode(30))
entite = ManyToOne(u'Entite')
email = Field(Unicode(20))
IsAdmin = Field(Boolean, default=False)
def __repr__(self):
return '<User : %s : %s : %s>' % (self.codusr, self.entite, self.IsAdmin)
class Fex(Entity):
using_options(tablename=u'FEX')
nom = Field(Unicode(30))
description = Field(UnicodeText)
status = Field(Unicode(10))
expediteur = ManyToOne(u'User')
destinataire = ManyToOne(u'User')
date_creation = Field(DateTime, default=datetime.datetime.now)
date_modif = Field(DateTime)
date_depot = Field(DateTime)
date_retrait = Field(DateTime)
def __repr__(self):
return '<File Exchange %s %s %s %s>' % (self.nom, self.status, self.expediteur, self.destinataire)
| Python |
import os
import sys
import warnings
import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib
# Important! To work on pypy, this must be a module that resides in the
# lib-python/modified-x.y.z directory
dirname = os.path.dirname
distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils')
if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
warnings.warn(
"The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
else:
__path__.insert(0, distutils_path)
exec(open(os.path.join(distutils_path, '__init__.py')).read())
try:
import dist
import sysconfig
except ImportError:
from distutils import dist, sysconfig
try:
basestring
except NameError:
basestring = str
## patch build_ext (distutils doesn't know how to get the libs directory
## path on windows - it hardcodes the paths around the patched sys.prefix)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext as old_build_ext
class build_ext(old_build_ext):
def finalize_options (self):
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, basestring):
self.library_dirs = self.library_dirs.split(os.pathsep)
self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
old_build_ext.finalize_options(self)
from distutils.command import build_ext as build_ext_module
build_ext_module.build_ext = build_ext
## distutils.dist patches:
old_find_config_files = dist.Distribution.find_config_files
def find_config_files(self):
found = old_find_config_files(self)
system_distutils = os.path.join(distutils_path, 'distutils.cfg')
#if os.path.exists(system_distutils):
# found.insert(0, system_distutils)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
user_filename = os.path.join(sys.prefix, user_filename)
if os.path.isfile(user_filename):
for item in list(found):
if item.endswith('pydistutils.cfg'):
found.remove(item)
found.append(user_filename)
return found
dist.Distribution.find_config_files = find_config_files
## distutils.sysconfig patches:
old_get_python_inc = sysconfig.get_python_inc
def sysconfig_get_python_inc(plat_specific=0, prefix=None):
if prefix is None:
prefix = sys.real_prefix
return old_get_python_inc(plat_specific, prefix)
sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
sysconfig.get_python_inc = sysconfig_get_python_inc
old_get_python_lib = sysconfig.get_python_lib
def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
if standard_lib and prefix is None:
prefix = sys.real_prefix
return old_get_python_lib(plat_specific, standard_lib, prefix)
sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
sysconfig.get_python_lib = sysconfig_get_python_lib
old_get_config_vars = sysconfig.get_config_vars
def sysconfig_get_config_vars(*args):
real_vars = old_get_config_vars(*args)
if sys.platform == 'win32':
lib_dir = os.path.join(sys.real_prefix, "libs")
if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars:
real_vars['LIBDIR'] = lib_dir # asked for all
elif isinstance(real_vars, list) and 'LIBDIR' in args:
real_vars = real_vars + [lib_dir] # asked for list
return real_vars
sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
sysconfig.get_config_vars = sysconfig_get_config_vars
| Python |
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://codespeak.net/pypy")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
elif _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', 'modified-%s' % cpyver),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to sys.prefix:
plat_path = os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3],
'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| Python |
#!/bin/python
#
# Copyright (C) 2012 Gedare Bloom
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# $Id$
#
from datetime import date, timedelta
import email
import getopt
import getpass
import os
import re
import sys
## get imapclient at http://imapclient.freshfoo.com/
from imapclient import IMAPClient
def usage():
print "\
fetch-flagged-email.py retrieves email that has been \\\\Flagged (gmail starred).\n\
Specify a folder (gmail label) and it will retrieve all flagged emails\n\
in that folder using imap and your account/password. The imap support\n\
relies on a non-standard python library imapclient which is available at\n\
http://imapclient.freshfoo.com/. By default fetched emails remain flagged\n\
on the server, but passing the -r flag unflags (unstars) them.\n\
Emails are saved in text files based on email sequence numbers.\n\
\n\
You can customize the default arguments in fetch-flagged-email.py:main().\n\
Your email account and password can be entered interactively in addition to\n\
passed on the command line or hard-coded as default arguments.\n\
\n\
Usage: fetch-flagged-email.py -[hi:f:u:p:d:o:r]\n\
-h --help print this help\n\
-i --imap imap server [imap.gmail.com]\n\
-f --folder folder to use [INBOX]\n\
-u --username email account []\n\
-p --password account password []\n\
-d --days days back to look (0 for all) [0]\n\
-o --output output directory for messages [.]\n\
-r --resetflag reset \\\\Flagged (starred) attribute [False]\n"
def connect(server_address):
return IMAPClient(server_address, use_uid=True, ssl=True)
def searchmail(imap_server, search, output, resetflag):
messages = imap_server.search(search)
messages_contents = imap_server.fetch(messages, ['RFC822'])
for message_id, data in messages_contents.iteritems():
message_string = data['RFC822']
message = email.message_from_string(message_string)
filename = "{0:04}.mail".format(data['SEQ'])
outfile = os.path.join(output,filename)
f = open(outfile, 'w')
f.write(message_string)
f.close()
print "{0} -> {1}\n".format(message['subject'], outfile)
if resetflag:
imap_server.remove_flags(messages, '\\Flagged')
def main():
## Customize these default args and stop using any command line arguments.
# imap server
imap = 'imap.gmail.com'
# restrict searching to a folder (label) name you use
folder = 'INBOX'
# username and password can be entered on command line, interactively,
# or hard-coded here. username is of the form user@domain.com
username = ''
password = ''
# For search keywords see http://tools.ietf.org/html/rfc3501#section-6.4.4
## To get flagged (starred) email that has PATCH in the subject line
## and is not a reply:
## search = ['FLAGGED', 'SUBJECT \"PATCH\"', 'NOT SUBJECT \"Re\"']
## Default behavior: get flagged email
search = ['FLAGGED']
# set to '0' to get all
days = '0'
# where search result emails get written
output = "."
# Set to True to reset the Flagged flag (starred) automatically
resetflag = False
# Process args
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:f:u:p:d:o:r",
["help", "imap=", "folder", "username=", "password=",
"days=", "output=", "resetflag="])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-i", "--imap"):
imap = True
elif opt in ("-f", "--folder"):
folder = arg
elif opt in ("-u", "--username"):
username = arg
elif opt in ("-p", "--password"):
password = arg
elif opt in ("-d", "--days"):
days = arg
elif opt in ("-o", "--output"):
output = arg
elif opt in ("-r", "--resetflag"):
resetflag = True
else:
assert False, "unhandled option"
if not os.path.exists(output):
print("Invalid output directory: " + output)
sys.exit(1)
if not username:
username = raw_input("Enter email account: ")
if not password:
password = getpass.getpass("Enter email account password: ")
if not days == "0":
cutoff = date.today() - timedelta(int(days))
search.append("SINCE {0}".format(cutoff.strftime('%d-%b-%Y')))
imap_server = connect(imap)
imap_server.login(username, password)
imap_server.select_folder(folder)
searchmail(imap_server, search, output, resetflag)
if __name__ == "__main__":
main()
| Python |
#!/bin/python
#
# Copyright (C) 2012 Gedare Bloom
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# $Id: fetch-flagged-email.py 2 2012-04-19 00:40:58Z gedarebloom@gmail.com $
#
from datetime import date, timedelta
import email
import getopt
import getpass
import os
import re
import sys
## get imapclient at http://imapclient.freshfoo.com/
from imapclient import IMAPClient
def usage():
print "\
fetch-flagged-email.py retrieves email that has been \\\\Flagged (gmail starred).\n\
Specify a folder (gmail label) and it will retrieve all flagged emails\n\
in that folder using imap and your account/password. The imap support\n\
relies on a non-standard python library imapclient which is available at\n\
http://imapclient.freshfoo.com/. By default fetched emails remain flagged\n\
on the server, but passing the -r flag unflags (unstars) them.\n\
Emails are saved in text files based on email sequence numbers.\n\
\n\
You can customize the default arguments in fetch-flagged-email.py:main().\n\
Your email account and password can be entered interactively in addition to\n\
passed on the command line or hard-coded as default arguments.\n\
\n\
Usage: fetch-flagged-email.py -[hi:f:u:p:d:o:r]\n\
-h --help print this help\n\
-i --imap imap server [imap.gmail.com]\n\
-f --folder folder to use [INBOX]\n\
-u --username email account []\n\
-p --password account password []\n\
-d --days days back to look (0 for all) [0]\n\
-o --output output directory for messages [.]\n\
-r --resetflag reset \\\\Flagged (starred) attribute [False]\n"
def connect(server_address):
return IMAPClient(server_address, use_uid=True, ssl=True)
def searchmail(imap_server, search, output, resetflag):
messages = imap_server.search(search)
messages_contents = imap_server.fetch(messages, ['RFC822'])
for message_id, data in messages_contents.iteritems():
message_string = data['RFC822']
message = email.message_from_string(message_string)
filename = "{0:04}.mail".format(data['SEQ'])
outfile = os.path.join(output,filename)
f = open(outfile, 'w')
f.write(message_string)
f.close()
print "{0} -> {1}\n".format(message['subject'], outfile)
if resetflag:
imap_server.remove_flags(messages, '\\Flagged')
def main():
## Customize these default args and stop using any command line arguments.
# imap server
imap = 'imap.gmail.com'
# restrict searching to a folder (label) name you use
folder = 'INBOX'
# username and password can be entered on command line, interactively,
# or hard-coded here. username is of the form user@domain.com
username = ''
password = ''
# For search keywords see http://tools.ietf.org/html/rfc3501#section-6.4.4
## To get flagged (starred) email that has PATCH in the subject line
## and is not a reply:
## search = ['FLAGGED', 'SUBJECT \"PATCH\"', 'NOT SUBJECT \"Re\"']
## Default behavior: get flagged email
search = ['FLAGGED']
# set to '0' to get all
days = '0'
# where search result emails get written
output = "."
# Set to True to reset the Flagged flag (starred) automatically
resetflag = False
# Process args
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:f:u:p:d:o:r",
["help", "imap=", "folder", "username=", "password=",
"days=", "output=", "resetflag="])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-i", "--imap"):
imap = True
elif opt in ("-f", "--folder"):
folder = arg
elif opt in ("-u", "--username"):
username = arg
elif opt in ("-p", "--password"):
password = arg
elif opt in ("-d", "--days"):
days = arg
elif opt in ("-o", "--output"):
output = arg
elif opt in ("-r", "--resetflag"):
resetflag = True
else:
assert False, "unhandled option"
if not os.path.exists(output):
print("Invalid output directory: " + output)
sys.exit(1)
if not username:
username = raw_input("Enter email account: ")
if not password:
password = getpass.getpass("Enter email account password: ")
if not days == "0":
cutoff = date.today() - timedelta(int(days))
search.append("SINCE {0}".format(cutoff.strftime('%d-%b-%Y')))
imap_server = connect(imap)
imap_server.login(username, password)
imap_server.select_folder(folder)
searchmail(imap_server, search, output, resetflag)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for internet
Copyright (C) 2003-2006 Frederico Caldeira Knabben
Licensed under the terms of the GNU Lesser General Public License:
http://www.opensource.org/licenses/lgpl-license.php
For further information visit:
http://www.fckeditor.net/
"Support Open Source software. What about a donation today?"
File Name: connector.py
Connector for Python.
Tested With:
Standard:
Python 2.3.3
Zope:
Zope Version: (Zope 2.8.1-final, python 2.3.5, linux2)
Python Version: 2.3.5 (#4, Mar 10 2005, 01:40:25)
[GCC 3.3.3 20040412 (Red Hat Linux 3.3.3-7)]
System Platform: linux2
File Authors:
Andrew Liu (andrew@liuholdings.com)
"""
"""
Author Notes (04 December 2005):
This module has gone through quite a few phases of change. Obviously,
I am only supporting that part of the code that I use. Initially
I had the upload directory as a part of zope (ie. uploading files
directly into Zope), before realising that there were too many
complex intricacies within Zope to deal with. Zope is one ugly piece
of code. So I decided to complement Zope by an Apache server (which
I had running anyway, and doing nothing). So I mapped all uploads
from an arbitrary server directory to an arbitrary web directory.
All the FCKeditor uploading occurred this way, and I didn't have to
stuff around with fiddling with Zope objects and the like (which are
terribly complex and something you don't want to do - trust me).
Maybe a Zope expert can touch up the Zope components. In the end,
I had FCKeditor loaded in Zope (probably a bad idea as well), and
I replaced the connector.py with an alias to a server module.
Right now, all Zope components will simple remain as is because
I've had enough of Zope.
See notes right at the end of this file for how I aliased out of Zope.
Anyway, most of you probably wont use Zope, so things are pretty
simple in that regard.
Typically, SERVER_DIR is the root of WEB_DIR (not necessarily).
Most definitely, SERVER_USERFILES_DIR points to WEB_USERFILES_DIR.
"""
import cgi
import re
import os
import string
"""
escape
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
def escape(text, replace=string.replace):
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
"""
getFCKeditorConnector
Creates a new instance of an FCKeditorConnector, and runs it
"""
def getFCKeditorConnector(context=None):
# Called from Zope. Passes the context through
connector = FCKeditorConnector(context=context)
return connector.run()
"""
FCKeditorRequest
A wrapper around the request object
Can handle normal CGI request, or a Zope request
Extend as required
"""
class FCKeditorRequest(object):
def __init__(self, context=None):
if (context is not None):
r = context.REQUEST
else:
r = cgi.FieldStorage()
self.context = context
self.request = r
def isZope(self):
if (self.context is not None):
return True
return False
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
value = None
if (self.isZope()):
value = self.request.get(key, default)
else:
if key in self.request.keys():
value = self.request[key].value
else:
value = default
return value
"""
FCKeditorConnector
The connector class
"""
class FCKeditorConnector(object):
# Configuration for FCKEditor
# can point to another server here, if linked correctly
#WEB_HOST = "http://127.0.0.1/"
WEB_HOST = ""
SERVER_DIR = "/var/www/html/"
WEB_USERFILES_FOLDER = WEB_HOST + "upload/"
SERVER_USERFILES_FOLDER = SERVER_DIR + "upload/"
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
# Class Attributes
parentFolderRe = re.compile("[\/][^\/]+[\/]?$")
"""
Constructor
"""
def __init__(self, context=None):
# The given root path will NOT be shown to the user
# Only the userFilesPath will be shown
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context=context)
self.rootPath = self.SERVER_DIR
self.userFilesFolder = self.SERVER_USERFILES_FOLDER
self.webUserFilesFolder = self.WEB_USERFILES_FOLDER
# Enables / Disables the connector
self.enabled = False # Set to True to enable this connector
# These are instance variables
self.zopeRootContext = None
self.zopeUploadContext = None
# Copied from php module =)
self.allowedExtensions = {
"File": None,
"Image": None,
"Flash": None,
"Media": None
}
self.deniedExtensions = {
"File": [ "php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess" ],
"Image": [ "php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess" ],
"Flash": [ "php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess" ],
"Media": [ "php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess" ]
}
"""
Zope specific functions
"""
def isZope(self):
# The context object is the zope object
if (self.context is not None):
return True
return False
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
"""
Generic manipulation functions
"""
def getUserFilesFolder(self):
return self.userFilesFolder
def getWebUserFilesFolder(self):
return self.webUserFilesFolder
def getAllowedExtensions(self, resourceType):
return self.allowedExtensions[resourceType]
def getDeniedExtensions(self, resourceType):
return self.deniedExtensions[resourceType]
def removeFromStart(self, string, char):
return string.lstrip(char)
def removeFromEnd(self, string, char):
return string.rstrip(char)
def convertToXmlAttribute(self, value):
if (value is None):
value = ""
return escape(value)
def convertToPath(self, path):
if (path[-1] <> "/"):
return path + "/"
else:
return path
def getUrlFromPath(self, resourceType, path):
if (resourceType is None) or (resourceType == ''):
url = "%s%s" % (
self.removeFromEnd(self.getUserFilesFolder(), '/'),
path
)
else:
url = "%s%s%s" % (
self.getUserFilesFolder(),
resourceType,
path
)
return url
def getWebUrlFromPath(self, resourceType, path):
if (resourceType is None) or (resourceType == ''):
url = "%s%s" % (
self.removeFromEnd(self.getWebUserFilesFolder(), '/'),
path
)
else:
url = "%s%s%s" % (
self.getWebUserFilesFolder(),
resourceType,
path
)
return url
def removeExtension(self, fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(self, fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def getParentFolder(self, folderPath):
parentFolderPath = self.parentFolderRe.sub('', folderPath)
return parentFolderPath
"""
serverMapFolder
Purpose: works out the folder map on the server
"""
def serverMapFolder(self, resourceType, folderPath):
# Get the resource type directory
resourceTypeFolder = "%s%s/" % (
self.getUserFilesFolder(),
resourceType
)
# Ensure that the directory exists
self.createServerFolder(resourceTypeFolder)
# Return the resource type directory combined with the
# required path
return "%s%s" % (
resourceTypeFolder,
self.removeFromStart(folderPath, '/')
)
"""
createServerFolder
Purpose: physically creates a folder on the server
"""
def createServerFolder(self, folderPath):
# Check if the parent exists
parentFolderPath = self.getParentFolder(folderPath)
if not(os.path.exists(parentFolderPath)):
errorMsg = self.createServerFolder(parentFolderPath)
if errorMsg is not None:
return errorMsg
# Check if this exists
if not(os.path.exists(folderPath)):
os.mkdir(folderPath)
os.chmod(folderPath, 0755)
errorMsg = None
else:
if os.path.isdir(folderPath):
errorMsg = None
else:
raise "createServerFolder: Non-folder of same name already exists"
return errorMsg
"""
getRootPath
Purpose: returns the root path on the server
"""
def getRootPath(self):
return self.rootPath
"""
setXmlHeaders
Purpose: to prepare the headers for the xml to return
"""
def setXmlHeaders(self):
#now = self.context.BS_get_now()
#yesterday = now - 1
self.setHeader("Content-Type", "text/xml")
#self.setHeader("Expires", yesterday)
#self.setHeader("Last-Modified", now)
#self.setHeader("Cache-Control", "no-store, no-cache, must-revalidate")
self.printHeaders()
return
def setHeader(self, key, value):
if (self.isZope()):
self.context.REQUEST.RESPONSE.setHeader(key, value)
else:
print "%s: %s" % (key, value)
return
def printHeaders(self):
# For non-Zope requests, we need to print an empty line
# to denote the end of headers
if (not(self.isZope())):
print ""
"""
createXmlFooter
Purpose: returns the xml header
"""
def createXmlHeader(self, command, resourceType, currentFolder):
self.setXmlHeaders()
s = ""
# Create the XML document header
s += """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
self.convertToXmlAttribute(currentFolder),
self.convertToXmlAttribute(
self.getWebUrlFromPath(
resourceType,
currentFolder
)
),
)
return s
"""
createXmlFooter
Purpose: returns the xml footer
"""
def createXmlFooter(self):
s = """</Connector>"""
return s
"""
sendError
Purpose: in the event of an error, return an xml based error
"""
def sendError(self, number, text):
self.setXmlHeaders()
s = ""
# Create the XML document header
s += """<?xml version="1.0" encoding="utf-8" ?>"""
s += """<Connector>"""
s += """<Error number="%s" text="%s" />""" % (number, text)
s += """</Connector>"""
return s
"""
getFolders
Purpose: command to recieve a list of folders
"""
def getFolders(self, resourceType, currentFolder):
if (self.isZope()):
return self.getZopeFolders(resourceType, currentFolder)
else:
return self.getNonZopeFolders(resourceType, currentFolder)
def getZopeFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getNonZopeFolders(self, resourceType, currentFolder):
# Map the virtual path to our local server
serverPath = self.serverMapFolder(resourceType, currentFolder)
# Open the folders node
s = ""
s += """<Folders>"""
for someObject in os.listdir(serverPath):
someObjectPath = os.path.join(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(someObject)
)
# Close the folders node
s += """</Folders>"""
return s
"""
getFoldersAndFiles
Purpose: command to recieve a list of folders and files
"""
def getFoldersAndFiles(self, resourceType, currentFolder):
if (self.isZope()):
return self.getZopeFoldersAndFiles(resourceType, currentFolder)
else:
return self.getNonZopeFoldersAndFiles(resourceType, currentFolder)
def getNonZopeFoldersAndFiles(self, resourceType, currentFolder):
# Map the virtual path to our local server
serverPath = self.serverMapFolder(resourceType, currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = os.path.join(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
self.convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
# Return it
s = folders + files
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
self.convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
"""
createFolder
Purpose: command to create a new folder
"""
def createFolder(self, resourceType, currentFolder):
if (self.isZope()):
return self.createZopeFolder(resourceType, currentFolder)
else:
return self.createNonZopeFolder(resourceType, currentFolder)
def createZopeFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
error = """<Error number="%s" originalDescription="%s" />""" % (
errorNo,
self.convertToXmlAttribute(errorMsg)
)
return error
def createNonZopeFolder(self, resourceType, currentFolder):
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
currentFolderPath = self.serverMapFolder(
resourceType,
currentFolder
)
try:
newFolderPath = currentFolderPath + newFolder
errorMsg = self.createServerFolder(newFolderPath)
if (errorMsg is not None):
errorNo = 110
except:
errorNo = 103
else:
errorNo = 102
error = """<Error number="%s" originalDescription="%s" />""" % (
errorNo,
self.convertToXmlAttribute(errorMsg)
)
return error
"""
getFileName
Purpose: helper function to extrapolate the filename
"""
def getFileName(self, filename):
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
"""
fileUpload
Purpose: command to upload files to server
"""
def fileUpload(self, resourceType, currentFolder):
if (self.isZope()):
return self.zopeFileUpload(resourceType, currentFolder)
else:
return self.nonZopeFileUpload(resourceType, currentFolder)
def zopeFileUpload(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
self.zopeFileUpload(resourceType, currentFolder, count)
return
def nonZopeFileUpload(self, resourceType, currentFolder):
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileNameOnly = self.removeExtension(newFileName)
newFileExtension = self.getExtension(newFileName).lower()
allowedExtensions = self.getAllowedExtensions(resourceType)
deniedExtensions = self.getDeniedExtensions(resourceType)
if (allowedExtensions is not None):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions is not None):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
if (self.isZope()):
# Upload into zope
self.zopeFileUpload(resourceType, currentFolder)
else:
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = self.serverMapFolder(
resourceType,
currentFolder
)
i = 0
while (True):
newFilePath = "%s%s" % (
currentFolderPath,
newFileName
)
if os.path.exists(newFilePath):
i += 1
newFilePath = "%s%s(%s).%s" % (
currentFolderPath,
newFileNameOnly,
i,
newFileExtension
)
errorNo = 201
break
else:
fileHandle = open(newFilePath,'w')
linecount = 0
while (1):
#line = newFile.file.readline()
line = newFile.readline()
if not line: break
fileHandle.write("%s" % line)
linecount += 1
os.chmod(newFilePath, 0777)
break
else:
newFileName = "Extension not allowed"
errorNo = 203
else:
newFileName = "No File"
errorNo = 202
string = """
<script type="text/javascript">
window.parent.frames["frmUpload"].OnUploadCompleted(%s,"%s");
</script>
""" % (
errorNo,
newFileName.replace('"',"'")
)
return string
def run(self):
s = ""
try:
# Check if this is disabled
if not(self.enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations and try again")
# Make sure we have valid inputs
if not(
(self.request.has_key("Command")) and
(self.request.has_key("Type")) and
(self.request.has_key("CurrentFolder"))
):
return
# Get command
command = self.request.get("Command", None)
# Get resource type
resourceType = self.request.get("Type", None)
# folder syntax must start and end with "/"
currentFolder = self.request.get("CurrentFolder", None)
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Check for invalid paths
if (".." in currentFolder):
return self.sendError(102, "")
# File upload doesn't have to return XML, so intercept
# her:e
if (command == "FileUpload"):
return self.fileUpload(resourceType, currentFolder)
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder)
# Execute the command
if (command == "GetFolders"):
f = self.getFolders
elif (command == "GetFoldersAndFiles"):
f = self.getFoldersAndFiles
elif (command == "CreateFolder"):
f = self.createFolder
else:
f = None
if (f is not None):
s += f(resourceType, currentFolder)
s += self.createXmlFooter()
except Exception, e:
s = "ERROR: %s" % e
return s
# Running from command line
if __name__ == '__main__':
# To test the output, uncomment the standard headers
#print "Content-Type: text/html"
#print ""
print getFCKeditorConnector()
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.connector as connector
return connector.getFCKeditorConnector(context=context).run()
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for internet
Copyright (C) 2003-2006 Frederico Caldeira Knabben
Licensed under the terms of the GNU Lesser General Public License:
http://www.opensource.org/licenses/lgpl-license.php
For further information visit:
http://www.fckeditor.net/
"Support Open Source software. What about a donation today?"
File Name: connector.py
Connector for Python.
Tested With:
Standard:
Python 2.3.3
Zope:
Zope Version: (Zope 2.8.1-final, python 2.3.5, linux2)
Python Version: 2.3.5 (#4, Mar 10 2005, 01:40:25)
[GCC 3.3.3 20040412 (Red Hat Linux 3.3.3-7)]
System Platform: linux2
File Authors:
Andrew Liu (andrew@liuholdings.com)
"""
"""
Author Notes (04 December 2005):
This module has gone through quite a few phases of change. Obviously,
I am only supporting that part of the code that I use. Initially
I had the upload directory as a part of zope (ie. uploading files
directly into Zope), before realising that there were too many
complex intricacies within Zope to deal with. Zope is one ugly piece
of code. So I decided to complement Zope by an Apache server (which
I had running anyway, and doing nothing). So I mapped all uploads
from an arbitrary server directory to an arbitrary web directory.
All the FCKeditor uploading occurred this way, and I didn't have to
stuff around with fiddling with Zope objects and the like (which are
terribly complex and something you don't want to do - trust me).
Maybe a Zope expert can touch up the Zope components. In the end,
I had FCKeditor loaded in Zope (probably a bad idea as well), and
I replaced the connector.py with an alias to a server module.
Right now, all Zope components will simple remain as is because
I've had enough of Zope.
See notes right at the end of this file for how I aliased out of Zope.
Anyway, most of you probably wont use Zope, so things are pretty
simple in that regard.
Typically, SERVER_DIR is the root of WEB_DIR (not necessarily).
Most definitely, SERVER_USERFILES_DIR points to WEB_USERFILES_DIR.
"""
import cgi
import re
import os
import string
"""
escape
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
def escape(text, replace=string.replace):
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
"""
getFCKeditorConnector
Creates a new instance of an FCKeditorConnector, and runs it
"""
def getFCKeditorConnector(context=None):
# Called from Zope. Passes the context through
connector = FCKeditorConnector(context=context)
return connector.run()
"""
FCKeditorRequest
A wrapper around the request object
Can handle normal CGI request, or a Zope request
Extend as required
"""
class FCKeditorRequest(object):
def __init__(self, context=None):
if (context is not None):
r = context.REQUEST
else:
r = cgi.FieldStorage()
self.context = context
self.request = r
def isZope(self):
if (self.context is not None):
return True
return False
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
value = None
if (self.isZope()):
value = self.request.get(key, default)
else:
if key in self.request.keys():
value = self.request[key].value
else:
value = default
return value
"""
FCKeditorConnector
The connector class
"""
class FCKeditorConnector(object):
# Configuration for FCKEditor
# can point to another server here, if linked correctly
#WEB_HOST = "http://127.0.0.1/"
WEB_HOST = ""
SERVER_DIR = "/var/www/html/"
WEB_USERFILES_FOLDER = WEB_HOST + "upload/"
SERVER_USERFILES_FOLDER = SERVER_DIR + "upload/"
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
# Class Attributes
parentFolderRe = re.compile("[\/][^\/]+[\/]?$")
"""
Constructor
"""
def __init__(self, context=None):
# The given root path will NOT be shown to the user
# Only the userFilesPath will be shown
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context=context)
self.rootPath = self.SERVER_DIR
self.userFilesFolder = self.SERVER_USERFILES_FOLDER
self.webUserFilesFolder = self.WEB_USERFILES_FOLDER
# Enables / Disables the connector
self.enabled = False # Set to True to enable this connector
# These are instance variables
self.zopeRootContext = None
self.zopeUploadContext = None
# Copied from php module =)
self.allowedExtensions = {
"File": None,
"Image": None,
"Flash": None,
"Media": None
}
self.deniedExtensions = {
"File": [ "php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess" ],
"Image": [ "php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess" ],
"Flash": [ "php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess" ],
"Media": [ "php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess" ]
}
"""
Zope specific functions
"""
def isZope(self):
# The context object is the zope object
if (self.context is not None):
return True
return False
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
"""
Generic manipulation functions
"""
def getUserFilesFolder(self):
return self.userFilesFolder
def getWebUserFilesFolder(self):
return self.webUserFilesFolder
def getAllowedExtensions(self, resourceType):
return self.allowedExtensions[resourceType]
def getDeniedExtensions(self, resourceType):
return self.deniedExtensions[resourceType]
def removeFromStart(self, string, char):
return string.lstrip(char)
def removeFromEnd(self, string, char):
return string.rstrip(char)
def convertToXmlAttribute(self, value):
if (value is None):
value = ""
return escape(value)
def convertToPath(self, path):
if (path[-1] <> "/"):
return path + "/"
else:
return path
def getUrlFromPath(self, resourceType, path):
if (resourceType is None) or (resourceType == ''):
url = "%s%s" % (
self.removeFromEnd(self.getUserFilesFolder(), '/'),
path
)
else:
url = "%s%s%s" % (
self.getUserFilesFolder(),
resourceType,
path
)
return url
def getWebUrlFromPath(self, resourceType, path):
if (resourceType is None) or (resourceType == ''):
url = "%s%s" % (
self.removeFromEnd(self.getWebUserFilesFolder(), '/'),
path
)
else:
url = "%s%s%s" % (
self.getWebUserFilesFolder(),
resourceType,
path
)
return url
def removeExtension(self, fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(self, fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def getParentFolder(self, folderPath):
parentFolderPath = self.parentFolderRe.sub('', folderPath)
return parentFolderPath
"""
serverMapFolder
Purpose: works out the folder map on the server
"""
def serverMapFolder(self, resourceType, folderPath):
# Get the resource type directory
resourceTypeFolder = "%s%s/" % (
self.getUserFilesFolder(),
resourceType
)
# Ensure that the directory exists
self.createServerFolder(resourceTypeFolder)
# Return the resource type directory combined with the
# required path
return "%s%s" % (
resourceTypeFolder,
self.removeFromStart(folderPath, '/')
)
"""
createServerFolder
Purpose: physically creates a folder on the server
"""
def createServerFolder(self, folderPath):
# Check if the parent exists
parentFolderPath = self.getParentFolder(folderPath)
if not(os.path.exists(parentFolderPath)):
errorMsg = self.createServerFolder(parentFolderPath)
if errorMsg is not None:
return errorMsg
# Check if this exists
if not(os.path.exists(folderPath)):
os.mkdir(folderPath)
os.chmod(folderPath, 0755)
errorMsg = None
else:
if os.path.isdir(folderPath):
errorMsg = None
else:
raise "createServerFolder: Non-folder of same name already exists"
return errorMsg
"""
getRootPath
Purpose: returns the root path on the server
"""
def getRootPath(self):
return self.rootPath
"""
setXmlHeaders
Purpose: to prepare the headers for the xml to return
"""
def setXmlHeaders(self):
#now = self.context.BS_get_now()
#yesterday = now - 1
self.setHeader("Content-Type", "text/xml")
#self.setHeader("Expires", yesterday)
#self.setHeader("Last-Modified", now)
#self.setHeader("Cache-Control", "no-store, no-cache, must-revalidate")
self.printHeaders()
return
def setHeader(self, key, value):
if (self.isZope()):
self.context.REQUEST.RESPONSE.setHeader(key, value)
else:
print "%s: %s" % (key, value)
return
def printHeaders(self):
# For non-Zope requests, we need to print an empty line
# to denote the end of headers
if (not(self.isZope())):
print ""
"""
createXmlFooter
Purpose: returns the xml header
"""
def createXmlHeader(self, command, resourceType, currentFolder):
self.setXmlHeaders()
s = ""
# Create the XML document header
s += """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
self.convertToXmlAttribute(currentFolder),
self.convertToXmlAttribute(
self.getWebUrlFromPath(
resourceType,
currentFolder
)
),
)
return s
"""
createXmlFooter
Purpose: returns the xml footer
"""
def createXmlFooter(self):
s = """</Connector>"""
return s
"""
sendError
Purpose: in the event of an error, return an xml based error
"""
def sendError(self, number, text):
self.setXmlHeaders()
s = ""
# Create the XML document header
s += """<?xml version="1.0" encoding="utf-8" ?>"""
s += """<Connector>"""
s += """<Error number="%s" text="%s" />""" % (number, text)
s += """</Connector>"""
return s
"""
getFolders
Purpose: command to recieve a list of folders
"""
def getFolders(self, resourceType, currentFolder):
if (self.isZope()):
return self.getZopeFolders(resourceType, currentFolder)
else:
return self.getNonZopeFolders(resourceType, currentFolder)
def getZopeFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getNonZopeFolders(self, resourceType, currentFolder):
# Map the virtual path to our local server
serverPath = self.serverMapFolder(resourceType, currentFolder)
# Open the folders node
s = ""
s += """<Folders>"""
for someObject in os.listdir(serverPath):
someObjectPath = os.path.join(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(someObject)
)
# Close the folders node
s += """</Folders>"""
return s
"""
getFoldersAndFiles
Purpose: command to recieve a list of folders and files
"""
def getFoldersAndFiles(self, resourceType, currentFolder):
if (self.isZope()):
return self.getZopeFoldersAndFiles(resourceType, currentFolder)
else:
return self.getNonZopeFoldersAndFiles(resourceType, currentFolder)
def getNonZopeFoldersAndFiles(self, resourceType, currentFolder):
# Map the virtual path to our local server
serverPath = self.serverMapFolder(resourceType, currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = os.path.join(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
self.convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
# Return it
s = folders + files
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
self.convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
"""
createFolder
Purpose: command to create a new folder
"""
def createFolder(self, resourceType, currentFolder):
if (self.isZope()):
return self.createZopeFolder(resourceType, currentFolder)
else:
return self.createNonZopeFolder(resourceType, currentFolder)
def createZopeFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
error = """<Error number="%s" originalDescription="%s" />""" % (
errorNo,
self.convertToXmlAttribute(errorMsg)
)
return error
def createNonZopeFolder(self, resourceType, currentFolder):
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
currentFolderPath = self.serverMapFolder(
resourceType,
currentFolder
)
try:
newFolderPath = currentFolderPath + newFolder
errorMsg = self.createServerFolder(newFolderPath)
if (errorMsg is not None):
errorNo = 110
except:
errorNo = 103
else:
errorNo = 102
error = """<Error number="%s" originalDescription="%s" />""" % (
errorNo,
self.convertToXmlAttribute(errorMsg)
)
return error
"""
getFileName
Purpose: helper function to extrapolate the filename
"""
def getFileName(self, filename):
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
"""
fileUpload
Purpose: command to upload files to server
"""
def fileUpload(self, resourceType, currentFolder):
if (self.isZope()):
return self.zopeFileUpload(resourceType, currentFolder)
else:
return self.nonZopeFileUpload(resourceType, currentFolder)
def zopeFileUpload(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
self.zopeFileUpload(resourceType, currentFolder, count)
return
def nonZopeFileUpload(self, resourceType, currentFolder):
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileNameOnly = self.removeExtension(newFileName)
newFileExtension = self.getExtension(newFileName).lower()
allowedExtensions = self.getAllowedExtensions(resourceType)
deniedExtensions = self.getDeniedExtensions(resourceType)
if (allowedExtensions is not None):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions is not None):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
if (self.isZope()):
# Upload into zope
self.zopeFileUpload(resourceType, currentFolder)
else:
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = self.serverMapFolder(
resourceType,
currentFolder
)
i = 0
while (True):
newFilePath = "%s%s" % (
currentFolderPath,
newFileName
)
if os.path.exists(newFilePath):
i += 1
newFilePath = "%s%s(%s).%s" % (
currentFolderPath,
newFileNameOnly,
i,
newFileExtension
)
errorNo = 201
break
else:
fileHandle = open(newFilePath,'w')
linecount = 0
while (1):
#line = newFile.file.readline()
line = newFile.readline()
if not line: break
fileHandle.write("%s" % line)
linecount += 1
os.chmod(newFilePath, 0777)
break
else:
newFileName = "Extension not allowed"
errorNo = 203
else:
newFileName = "No File"
errorNo = 202
string = """
<script type="text/javascript">
window.parent.frames["frmUpload"].OnUploadCompleted(%s,"%s");
</script>
""" % (
errorNo,
newFileName.replace('"',"'")
)
return string
def run(self):
s = ""
try:
# Check if this is disabled
if not(self.enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations and try again")
# Make sure we have valid inputs
if not(
(self.request.has_key("Command")) and
(self.request.has_key("Type")) and
(self.request.has_key("CurrentFolder"))
):
return
# Get command
command = self.request.get("Command", None)
# Get resource type
resourceType = self.request.get("Type", None)
# folder syntax must start and end with "/"
currentFolder = self.request.get("CurrentFolder", None)
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Check for invalid paths
if (".." in currentFolder):
return self.sendError(102, "")
# File upload doesn't have to return XML, so intercept
# her:e
if (command == "FileUpload"):
return self.fileUpload(resourceType, currentFolder)
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder)
# Execute the command
if (command == "GetFolders"):
f = self.getFolders
elif (command == "GetFoldersAndFiles"):
f = self.getFoldersAndFiles
elif (command == "CreateFolder"):
f = self.createFolder
else:
f = None
if (f is not None):
s += f(resourceType, currentFolder)
s += self.createXmlFooter()
except Exception, e:
s = "ERROR: %s" % e
return s
# Running from command line
if __name__ == '__main__':
# To test the output, uncomment the standard headers
#print "Content-Type: text/html"
#print ""
print getFCKeditorConnector()
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.connector as connector
return connector.getFCKeditorConnector(context=context).run()
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = getCurrentFolder(self.request.get("CurrentFolder",""))
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utilitys functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '(?u)\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '/\\.(?![^.]*$)/', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '(?u)/\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = getCurrentFolder(self.request.get("CurrentFolder",""))
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utilitys functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '(?u)\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '/\\.(?![^.]*$)/', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '(?u)/\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorConnector( FCKeditorConnectorBase,
GetFoldersCommandMixin,
GetFoldersAndFilesCommandMixin,
CreateFolderCommandMixin,
UploadFileCommandMixin,
BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ):
"The Standard connector class."
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
s = ""
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.")
# Make sure we have valid inputs
for key in ("Command","Type","CurrentFolder"):
if not self.request.has_key (key):
return
# Get command, resource type and current folder
command = self.request.get("Command")
resourceType = self.request.get("Type")
currentFolder = getCurrentFolder(self.request.get("CurrentFolder"))
# Check for invalid paths
if currentFolder is None:
return self.sendError(102, "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendError( 1, 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendError( 1, 'Invalid type specified' )
# Setup paths
if command == "QuickUpload":
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
else:
self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType]
self.webUserFilesFolder = Config.FileTypesPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
if (command == "FileUpload"):
return self.uploadFile(resourceType, currentFolder)
# Create Url
url = combinePaths( self.webUserFilesFolder, currentFolder )
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder, url)
# Execute the command
selector = {"GetFolders": self.getFolders,
"GetFoldersAndFiles": self.getFoldersAndFiles,
"CreateFolder": self.createFolder,
}
s += selector[command](resourceType, currentFolder)
s += self.createXmlFooter()
return s
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorConnector()
data = conn.doResponse()
for header in conn.headers:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%04d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = self.webUserFilesFolder + currentFolder + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 203, customMsg = "Extension not allowed" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| Python |
#!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2008 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%04d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = self.webUserFilesFolder + currentFolder + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 203, customMsg = "Extension not allowed" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorConnector( FCKeditorConnectorBase,
GetFoldersCommandMixin,
GetFoldersAndFilesCommandMixin,
CreateFolderCommandMixin,
UploadFileCommandMixin,
BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ):
"The Standard connector class."
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
s = ""
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.")
# Make sure we have valid inputs
for key in ("Command","Type","CurrentFolder"):
if not self.request.has_key (key):
return
# Get command, resource type and current folder
command = self.request.get("Command")
resourceType = self.request.get("Type")
currentFolder = getCurrentFolder(self.request.get("CurrentFolder"))
# Check for invalid paths
if currentFolder is None:
return self.sendError(102, "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendError( 1, 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendError( 1, 'Invalid type specified' )
# Setup paths
if command == "QuickUpload":
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
else:
self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType]
self.webUserFilesFolder = Config.FileTypesPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
if (command == "FileUpload"):
return self.uploadFile(resourceType, currentFolder)
# Create Url
url = combinePaths( self.webUserFilesFolder, currentFolder )
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder, url)
# Execute the command
selector = {"GetFolders": self.getFolders,
"GetFoldersAndFiles": self.getFoldersAndFiles,
"CreateFolder": self.createFolder,
}
s += selector[command](resourceType, currentFolder)
s += self.createXmlFooter()
return s
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorConnector()
data = conn.doResponse()
for header in conn.headers:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2008 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| Python |
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the integration file for Python.
"""
import cgi
import os
import re
import string
def escape(text, replace=string.replace):
"""Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
text = replace(text, "'", ''')
return text
# The FCKeditor class
class FCKeditor(object):
def __init__(self, instanceName):
self.InstanceName = instanceName
self.BasePath = '/fckeditor/'
self.Width = '100%'
self.Height = '200'
self.ToolbarSet = 'Default'
self.Value = '';
self.Config = {}
def Create(self):
return self.CreateHtml()
def CreateHtml(self):
HtmlValue = escape(self.Value)
Html = ""
if (self.IsCompatible()):
File = "fckeditor.html"
Link = "%seditor/%s?InstanceName=%s" % (
self.BasePath,
File,
self.InstanceName
)
if (self.ToolbarSet is not None):
Link += "&Toolbar=%s" % self.ToolbarSet
# Render the linked hidden field
Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.InstanceName,
HtmlValue
)
# Render the configurations hidden field
Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.GetConfigFieldString()
)
# Render the editor iframe
Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % (
self.InstanceName,
Link,
self.Width,
self.Height
)
else:
if (self.Width.find("%%") < 0):
WidthCSS = "%spx" % self.Width
else:
WidthCSS = self.Width
if (self.Height.find("%%") < 0):
HeightCSS = "%spx" % self.Height
else:
HeightCSS = self.Height
Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % (
self.InstanceName,
WidthCSS,
HeightCSS,
HtmlValue
)
return Html
def IsCompatible(self):
if (os.environ.has_key("HTTP_USER_AGENT")):
sAgent = os.environ.get("HTTP_USER_AGENT", "")
else:
sAgent = ""
if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0):
i = sAgent.find("MSIE")
iVersion = float(sAgent[i+5:i+5+3])
if (iVersion >= 5.5):
return True
return False
elif (sAgent.find("Gecko/") >= 0):
i = sAgent.find("Gecko/")
iVersion = int(sAgent[i+6:i+6+8])
if (iVersion >= 20030210):
return True
return False
elif (sAgent.find("Opera/") >= 0):
i = sAgent.find("Opera/")
iVersion = float(sAgent[i+6:i+6+4])
if (iVersion >= 9.5):
return True
return False
elif (sAgent.find("AppleWebKit/") >= 0):
p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE)
m = p.search(sAgent)
if (m.group(1) >= 522):
return True
return False
else:
return False
def GetConfigFieldString(self):
sParams = ""
bFirst = True
for sKey in self.Config.keys():
sValue = self.Config[sKey]
if (not bFirst):
sParams += "&"
else:
bFirst = False
if (sValue):
k = escape(sKey)
v = escape(sValue)
if (sValue == "true"):
sParams += "%s=true" % k
elif (sValue == "false"):
sParams += "%s=false" % k
else:
sParams += "%s=%s" % (k, v)
return sParams
| Python |
#!/usr/bin/python
import datetime
import libxml2
import sys
name = "test-1200K.xml"
if len(sys.argv) > 1:
name = sys.argv[1]
else:
print "no input file use 'test-1200K.xml'"
doc = open(name)
doc.seek(0, 2)
size = doc.tell()
t1=datetime.datetime.now()
doc = libxml2.parseFile(name)
t2=datetime.datetime.now()
ctxt = doc.xpathNewContext()
res = ctxt.xpathEval("//*")
t3=datetime.datetime.now()
nb_node = len(res)
t4=datetime.datetime.now()
ctxt.xpathFreeContext()
t5=datetime.datetime.now()
doc.freeDoc()
t6=datetime.datetime.now()
dload = t2 - t1
dxpath = t3 - t2
dfree = t5 - t4
dclose = t6 - t5
dt_load = dload.seconds + dload.microseconds / 1000000.0
dt_xpath = dxpath.seconds + dxpath.microseconds / 1000000.0
dt_free = dfree.seconds + dfree.microseconds / 1000000.0
dt_close = dclose.seconds + dclose.microseconds / 1000000.0
print "stats for '%s':" % name
print "\tload_doc (%d bytes): \t %f sec" % (size, dt_load)
print "\txpath processed %d nodes:\t %f sec" % (nb_node, dt_xpath)
print "\trelease result:\t\t\t %f sec" % dt_free
print "\tclose_doc:\t\t\t %f sec" % dt_close
| Python |
#!/usr/bin/env python
################################################################################
##
## Photivo
##
## Copyright (C) 2013 Sergey Salnikov <salsergey@gmail.com>
##
## This file is part of Photivo.
##
## Photivo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License version 3
## as published by the Free Software Foundation.
##
## Photivo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Photivo. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
#
# This script generates CMakeLists.txt file. The script extracts all
# sources, headers and UIs from the photivoProject/photivoProject.pro
# file and adds them to CMakeLists.txt.in.
#
################################################################################
import sys
import os.path
import re
# Function to find if the source should be added.
def test_source(filename):
if filename.endswith('cpp') and not re.match('.*qtlockedfile.*', filename):
return True
else:
return False
# Function to find if the header file should be MOCed.
def test_header(filename):
file = open(filename)
for line in file:
if re.match('.*Q_OBJECT.*', line):
return True
return False
# Function that extracts the path to a file.
# The returned value means if the file list continues.
def match_to_path(files, line, test_function=None):
if line.endswith('\\'):
result = True
else:
result = False
if not re.match('^#', line):
line = re.split('\\\$', line)[0].strip()
if re.match('.*\.\./', line):
line = re.split('\.\./', line)[1]
if test_function == None or test_function(line):
files.append(line)
return result
# set the working directory to that containing this script
os.chdir(os.path.dirname(sys.argv[0]))
if not os.path.exists('CMakeLists.txt.in'):
print 'File CMakeLists.txt.in doesn\'t exist.'
exit(1)
if not os.path.exists('photivoProject/photivoProject.pro'):
print 'File photivoProject/photivoProject.pro doesn\'t exist.'
exit(1)
cmake_in = open('CMakeLists.txt.in', 'r')
qmake_pro = open('photivoProject/photivoProject.pro', 'r')
cmake_out = open('CMakeLists.txt', 'w')
sources = []
headers = []
uis = []
skip = False
copy_src = False
copy_hdr = False
copy_ui = False
for line in qmake_pro:
line = line.strip()
# these lines correspond to win32 only and we skip them
if re.match('win32', line):
skip = True
# the end of the win32 section
if re.match('}', line):
skip = False
if skip:
continue
# sources section found
if re.match('SOURCES', line):
copy_src = True
if copy_src:
copy_src = match_to_path(sources, line, test_source)
continue
# headers section found
if re.match('HEADERS', line):
copy_hdr = True
if copy_hdr:
copy_hdr = match_to_path(headers, line, test_header)
continue
# forms section found
if re.match('FORMS', line):
copy_ui = True
if copy_ui:
copy_ui = match_to_path(uis, line)
continue
for line in cmake_in:
cmake_out.write(line)
# sources section found
if re.match('^set\( photivo_SRCS', line):
cmake_out.write(' ' + '\n '.join(sources))
# headers section found
if re.match('^set\( photivo_MOC_HDRS', line):
cmake_out.write(' ' + '\n '.join(headers))
# forms section found
if re.match('^set\( photivo_UI_HDRS', line):
cmake_out.write(' ' + '\n '.join(uis))
cmake_in.close()
qmake_pro.close()
cmake_out.close()
| Python |
#!/usr/bin/env python
#
# scons-time - run SCons timings and collect statistics
#
# A script for running a configuration through SCons with a standard
# set of invocations to collect timing and memory statistics and to
# capture the results in a consistent set of output files for display
# and analysis.
#
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division
from __future__ import nested_scopes
__revision__ = "src/script/scons-time.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import getopt
import glob
import os
import re
import shutil
import sys
import tempfile
import time
try:
sorted
except NameError:
# Pre-2.4 Python has no sorted() function.
#
# The pre-2.4 Python list.sort() method does not support
# list.sort(key=) nor list.sort(reverse=) keyword arguments, so
# we must implement the functionality of those keyword arguments
# by hand instead of passing them to list.sort().
def sorted(iterable, cmp=None, key=None, reverse=False):
if key is not None:
result = [(key(x), x) for x in iterable]
else:
result = iterable[:]
if cmp is None:
# Pre-2.3 Python does not support list.sort(None).
result.sort()
else:
result.sort(cmp)
if key is not None:
result = [t1 for t0,t1 in result]
if reverse:
result.reverse()
return result
if os.environ.get('SCONS_HORRIBLE_REGRESSION_TEST_HACK') is not None:
# We can't apply the 'callable' fixer until the floor is 2.6, but the
# '-3' option to Python 2.6 and 2.7 generates almost ten thousand
# warnings. This hack allows us to run regression tests with the '-3'
# option by replacing the callable() built-in function with a hack
# that performs the same function but doesn't generate the warning.
# Note that this hack is ONLY intended to be used for regression
# testing, and should NEVER be used for real runs.
from types import ClassType
def callable(obj):
if hasattr(obj, '__call__'): return True
if isinstance(obj, (ClassType, type)): return True
return False
def make_temp_file(**kw):
try:
result = tempfile.mktemp(**kw)
try:
result = os.path.realpath(result)
except AttributeError:
# Python 2.1 has no os.path.realpath() method.
pass
except TypeError:
try:
save_template = tempfile.template
prefix = kw['prefix']
del kw['prefix']
tempfile.template = prefix
result = tempfile.mktemp(**kw)
finally:
tempfile.template = save_template
return result
def HACK_for_exec(cmd, *args):
'''
For some reason, Python won't allow an exec() within a function
that also declares an internal function (including lambda functions).
This function is a hack that calls exec() in a function with no
internal functions.
'''
if not args: exec(cmd)
elif len(args) == 1: exec cmd in args[0]
else: exec cmd in args[0], args[1]
class Plotter(object):
def increment_size(self, largest):
"""
Return the size of each horizontal increment line for a specified
maximum value. This returns a value that will provide somewhere
between 5 and 9 horizontal lines on the graph, on some set of
boundaries that are multiples of 10/100/1000/etc.
"""
i = largest // 5
if not i:
return largest
multiplier = 1
while i >= 10:
i = i // 10
multiplier = multiplier * 10
return i * multiplier
def max_graph_value(self, largest):
# Round up to next integer.
largest = int(largest) + 1
increment = self.increment_size(largest)
return ((largest + increment - 1) // increment) * increment
class Line(object):
def __init__(self, points, type, title, label, comment, fmt="%s %s"):
self.points = points
self.type = type
self.title = title
self.label = label
self.comment = comment
self.fmt = fmt
def print_label(self, inx, x, y):
if self.label:
print 'set label %s "%s" at %s,%s right' % (inx, self.label, x, y)
def plot_string(self):
if self.title:
title_string = 'title "%s"' % self.title
else:
title_string = 'notitle'
return "'-' %s with lines lt %s" % (title_string, self.type)
def print_points(self, fmt=None):
if fmt is None:
fmt = self.fmt
if self.comment:
print '# %s' % self.comment
for x, y in self.points:
# If y is None, it usually represents some kind of break
# in the line's index number. We might want to represent
# this some way rather than just drawing the line straight
# between the two points on either side.
if not y is None:
print fmt % (x, y)
print 'e'
def get_x_values(self):
return [ p[0] for p in self.points ]
def get_y_values(self):
return [ p[1] for p in self.points ]
class Gnuplotter(Plotter):
def __init__(self, title, key_location):
self.lines = []
self.title = title
self.key_location = key_location
def line(self, points, type, title=None, label=None, comment=None, fmt='%s %s'):
if points:
line = Line(points, type, title, label, comment, fmt)
self.lines.append(line)
def plot_string(self, line):
return line.plot_string()
def vertical_bar(self, x, type, label, comment):
if self.get_min_x() <= x and x <= self.get_max_x():
points = [(x, 0), (x, self.max_graph_value(self.get_max_y()))]
self.line(points, type, label, comment)
def get_all_x_values(self):
result = []
for line in self.lines:
result.extend(line.get_x_values())
return [r for r in result if not r is None]
def get_all_y_values(self):
result = []
for line in self.lines:
result.extend(line.get_y_values())
return [r for r in result if not r is None]
def get_min_x(self):
try:
return self.min_x
except AttributeError:
try:
self.min_x = min(self.get_all_x_values())
except ValueError:
self.min_x = 0
return self.min_x
def get_max_x(self):
try:
return self.max_x
except AttributeError:
try:
self.max_x = max(self.get_all_x_values())
except ValueError:
self.max_x = 0
return self.max_x
def get_min_y(self):
try:
return self.min_y
except AttributeError:
try:
self.min_y = min(self.get_all_y_values())
except ValueError:
self.min_y = 0
return self.min_y
def get_max_y(self):
try:
return self.max_y
except AttributeError:
try:
self.max_y = max(self.get_all_y_values())
except ValueError:
self.max_y = 0
return self.max_y
def draw(self):
if not self.lines:
return
if self.title:
print 'set title "%s"' % self.title
print 'set key %s' % self.key_location
min_y = self.get_min_y()
max_y = self.max_graph_value(self.get_max_y())
incr = (max_y - min_y) / 10.0
start = min_y + (max_y / 2.0) + (2.0 * incr)
position = [ start - (i * incr) for i in range(5) ]
inx = 1
for line in self.lines:
line.print_label(inx, line.points[0][0]-1,
position[(inx-1) % len(position)])
inx += 1
plot_strings = [ self.plot_string(l) for l in self.lines ]
print 'plot ' + ', \\\n '.join(plot_strings)
for line in self.lines:
line.print_points()
def untar(fname):
import tarfile
tar = tarfile.open(name=fname, mode='r')
for tarinfo in tar:
tar.extract(tarinfo)
tar.close()
def unzip(fname):
import zipfile
zf = zipfile.ZipFile(fname, 'r')
for name in zf.namelist():
dir = os.path.dirname(name)
try:
os.makedirs(dir)
except:
pass
open(name, 'w').write(zf.read(name))
def read_tree(dir):
for dirpath, dirnames, filenames in os.walk(dir):
for fn in filenames:
fn = os.path.join(dirpath, fn)
if os.path.isfile(fn):
open(fn, 'rb').read()
def redirect_to_file(command, log):
return '%s > %s 2>&1' % (command, log)
def tee_to_file(command, log):
return '%s 2>&1 | tee %s' % (command, log)
class SConsTimer(object):
"""
Usage: scons-time SUBCOMMAND [ARGUMENTS]
Type "scons-time help SUBCOMMAND" for help on a specific subcommand.
Available subcommands:
func Extract test-run data for a function
help Provides help
mem Extract --debug=memory data from test runs
obj Extract --debug=count data from test runs
time Extract --debug=time data from test runs
run Runs a test configuration
"""
name = 'scons-time'
name_spaces = ' '*len(name)
def makedict(**kw):
return kw
default_settings = makedict(
aegis = 'aegis',
aegis_project = None,
chdir = None,
config_file = None,
initial_commands = [],
key_location = 'bottom left',
orig_cwd = os.getcwd(),
outdir = None,
prefix = '',
python = '"%s"' % sys.executable,
redirect = redirect_to_file,
scons = None,
scons_flags = '--debug=count --debug=memory --debug=time --debug=memoizer',
scons_lib_dir = None,
scons_wrapper = None,
startup_targets = '--help',
subdir = None,
subversion_url = None,
svn = 'svn',
svn_co_flag = '-q',
tar = 'tar',
targets = '',
targets0 = None,
targets1 = None,
targets2 = None,
title = None,
unzip = 'unzip',
verbose = False,
vertical_bars = [],
unpack_map = {
'.tar.gz' : (untar, '%(tar)s xzf %%s'),
'.tgz' : (untar, '%(tar)s xzf %%s'),
'.tar' : (untar, '%(tar)s xf %%s'),
'.zip' : (unzip, '%(unzip)s %%s'),
},
)
run_titles = [
'Startup',
'Full build',
'Up-to-date build',
]
run_commands = [
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof0)s %(targets0)s',
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof1)s %(targets1)s',
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof2)s %(targets2)s',
]
stages = [
'pre-read',
'post-read',
'pre-build',
'post-build',
]
stage_strings = {
'pre-read' : 'Memory before reading SConscript files:',
'post-read' : 'Memory after reading SConscript files:',
'pre-build' : 'Memory before building targets:',
'post-build' : 'Memory after building targets:',
}
memory_string_all = 'Memory '
default_stage = stages[-1]
time_strings = {
'total' : 'Total build time',
'SConscripts' : 'Total SConscript file execution time',
'SCons' : 'Total SCons execution time',
'commands' : 'Total command execution time',
}
time_string_all = 'Total .* time'
#
def __init__(self):
self.__dict__.update(self.default_settings)
# Functions for displaying and executing commands.
def subst(self, x, dictionary):
try:
return x % dictionary
except TypeError:
# x isn't a string (it's probably a Python function),
# so just return it.
return x
def subst_variables(self, command, dictionary):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
try:
command + ''
except TypeError:
action = command[0]
string = command[1]
args = command[2:]
else:
action = command
string = action
args = (())
action = self.subst(action, dictionary)
string = self.subst(string, dictionary)
return (action, string, args)
def _do_not_display(self, msg, *args):
pass
def display(self, msg, *args):
"""
Displays the specified message.
Each message is prepended with a standard prefix of our name
plus the time.
"""
if callable(msg):
msg = msg(*args)
else:
msg = msg % args
if msg is None:
return
fmt = '%s[%s]: %s\n'
sys.stdout.write(fmt % (self.name, time.strftime('%H:%M:%S'), msg))
def _do_not_execute(self, action, *args):
pass
def execute(self, action, *args):
"""
Executes the specified action.
The action is called if it's a callable Python function, and
otherwise passed to os.system().
"""
if callable(action):
action(*args)
else:
os.system(action % args)
def run_command_list(self, commands, dict):
"""
Executes a list of commands, substituting values from the
specified dictionary.
"""
commands = [ self.subst_variables(c, dict) for c in commands ]
for action, string, args in commands:
self.display(string, *args)
sys.stdout.flush()
status = self.execute(action, *args)
if status:
sys.exit(status)
def log_display(self, command, log):
command = self.subst(command, self.__dict__)
if log:
command = self.redirect(command, log)
return command
def log_execute(self, command, log):
command = self.subst(command, self.__dict__)
output = os.popen(command).read()
if self.verbose:
sys.stdout.write(output)
open(log, 'wb').write(output)
#
def archive_splitext(self, path):
"""
Splits an archive name into a filename base and extension.
This is like os.path.splitext() (which it calls) except that it
also looks for '.tar.gz' and treats it as an atomic extensions.
"""
if path.endswith('.tar.gz'):
return path[:-7], path[-7:]
else:
return os.path.splitext(path)
def args_to_files(self, args, tail=None):
"""
Takes a list of arguments, expands any glob patterns, and
returns the last "tail" files from the list.
"""
files = []
for a in args:
files.extend(sorted(glob.glob(a)))
if tail:
files = files[-tail:]
return files
def ascii_table(self, files, columns,
line_function, file_function=lambda x: x,
*args, **kw):
header_fmt = ' '.join(['%12s'] * len(columns))
line_fmt = header_fmt + ' %s'
print header_fmt % columns
for file in files:
t = line_function(file, *args, **kw)
if t is None:
t = []
diff = len(columns) - len(t)
if diff > 0:
t += [''] * diff
t.append(file_function(file))
print line_fmt % tuple(t)
def collect_results(self, files, function, *args, **kw):
results = {}
for file in files:
base = os.path.splitext(file)[0]
run, index = base.split('-')[-2:]
run = int(run)
index = int(index)
value = function(file, *args, **kw)
try:
r = results[index]
except KeyError:
r = []
results[index] = r
r.append((run, value))
return results
def doc_to_help(self, obj):
"""
Translates an object's __doc__ string into help text.
This strips a consistent number of spaces from each line in the
help text, essentially "outdenting" the text to the left-most
column.
"""
doc = obj.__doc__
if doc is None:
return ''
return self.outdent(doc)
def find_next_run_number(self, dir, prefix):
"""
Returns the next run number in a directory for the specified prefix.
Examines the contents the specified directory for files with the
specified prefix, extracts the run numbers from each file name,
and returns the next run number after the largest it finds.
"""
x = re.compile(re.escape(prefix) + '-([0-9]+).*')
matches = [x.match(e) for e in os.listdir(dir)]
matches = [_f for _f in matches if _f]
if not matches:
return 0
run_numbers = [int(m.group(1)) for m in matches]
return int(max(run_numbers)) + 1
def gnuplot_results(self, results, fmt='%s %.3f'):
"""
Prints out a set of results in Gnuplot format.
"""
gp = Gnuplotter(self.title, self.key_location)
for i in sorted(results.keys()):
try:
t = self.run_titles[i]
except IndexError:
t = '??? %s ???' % i
results[i].sort()
gp.line(results[i], i+1, t, None, t, fmt=fmt)
for bar_tuple in self.vertical_bars:
try:
x, type, label, comment = bar_tuple
except ValueError:
x, type, label = bar_tuple
comment = label
gp.vertical_bar(x, type, label, comment)
gp.draw()
def logfile_name(self, invocation):
"""
Returns the absolute path of a log file for the specificed
invocation number.
"""
name = self.prefix_run + '-%d.log' % invocation
return os.path.join(self.outdir, name)
def outdent(self, s):
"""
Strip as many spaces from each line as are found at the beginning
of the first line in the list.
"""
lines = s.split('\n')
if lines[0] == '':
lines = lines[1:]
spaces = re.match(' *', lines[0]).group(0)
def strip_initial_spaces(l, s=spaces):
if l.startswith(spaces):
l = l[len(spaces):]
return l
return '\n'.join([ strip_initial_spaces(l) for l in lines ]) + '\n'
def profile_name(self, invocation):
"""
Returns the absolute path of a profile file for the specified
invocation number.
"""
name = self.prefix_run + '-%d.prof' % invocation
return os.path.join(self.outdir, name)
def set_env(self, key, value):
os.environ[key] = value
#
def get_debug_times(self, file, time_string=None):
"""
Fetch times from the --debug=time strings in the specified file.
"""
if time_string is None:
search_string = self.time_string_all
else:
search_string = time_string
contents = open(file).read()
if not contents:
sys.stderr.write('file %s has no contents!\n' % repr(file))
return None
result = re.findall(r'%s: ([\d\.]*)' % search_string, contents)[-4:]
result = [ float(r) for r in result ]
if not time_string is None:
try:
result = result[0]
except IndexError:
sys.stderr.write('file %s has no results!\n' % repr(file))
return None
return result
def get_function_profile(self, file, function):
"""
Returns the file, line number, function name, and cumulative time.
"""
try:
import pstats
except ImportError, e:
sys.stderr.write('%s: func: %s\n' % (self.name, e))
sys.stderr.write('%s This version of Python is missing the profiler.\n' % self.name_spaces)
sys.stderr.write('%s Cannot use the "func" subcommand.\n' % self.name_spaces)
sys.exit(1)
statistics = pstats.Stats(file).stats
matches = [ e for e in statistics.items() if e[0][2] == function ]
r = matches[0]
return r[0][0], r[0][1], r[0][2], r[1][3]
def get_function_time(self, file, function):
"""
Returns just the cumulative time for the specified function.
"""
return self.get_function_profile(file, function)[3]
def get_memory(self, file, memory_string=None):
"""
Returns a list of integers of the amount of memory used. The
default behavior is to return all the stages.
"""
if memory_string is None:
search_string = self.memory_string_all
else:
search_string = memory_string
lines = open(file).readlines()
lines = [ l for l in lines if l.startswith(search_string) ][-4:]
result = [ int(l.split()[-1]) for l in lines[-4:] ]
if len(result) == 1:
result = result[0]
return result
def get_object_counts(self, file, object_name, index=None):
"""
Returns the counts of the specified object_name.
"""
object_string = ' ' + object_name + '\n'
lines = open(file).readlines()
line = [ l for l in lines if l.endswith(object_string) ][0]
result = [ int(field) for field in line.split()[:4] ]
if index is not None:
result = result[index]
return result
#
command_alias = {}
def execute_subcommand(self, argv):
"""
Executes the do_*() function for the specified subcommand (argv[0]).
"""
if not argv:
return
cmdName = self.command_alias.get(argv[0], argv[0])
try:
func = getattr(self, 'do_' + cmdName)
except AttributeError:
return self.default(argv)
try:
return func(argv)
except TypeError, e:
sys.stderr.write("%s %s: %s\n" % (self.name, cmdName, e))
import traceback
traceback.print_exc(file=sys.stderr)
sys.stderr.write("Try '%s help %s'\n" % (self.name, cmdName))
def default(self, argv):
"""
The default behavior for an unknown subcommand. Prints an
error message and exits.
"""
sys.stderr.write('%s: Unknown subcommand "%s".\n' % (self.name, argv[0]))
sys.stderr.write('Type "%s help" for usage.\n' % self.name)
sys.exit(1)
#
def do_help(self, argv):
"""
"""
if argv[1:]:
for arg in argv[1:]:
try:
func = getattr(self, 'do_' + arg)
except AttributeError:
sys.stderr.write('%s: No help for "%s"\n' % (self.name, arg))
else:
try:
help = getattr(self, 'help_' + arg)
except AttributeError:
sys.stdout.write(self.doc_to_help(func))
sys.stdout.flush()
else:
help()
else:
doc = self.doc_to_help(self.__class__)
if doc:
sys.stdout.write(doc)
sys.stdout.flush()
return None
#
def help_func(self):
help = """\
Usage: scons-time func [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
--func=NAME, --function=NAME Report time for function NAME
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_func(self, argv):
"""
"""
format = 'ascii'
function_name = '_main'
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'func=',
'function=',
'help',
'prefix=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('--func', '--function'):
function_name = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'func'])
sys.exit(0)
elif o in ('--max',):
max_time = int(a)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if self.config_file:
exec open(self.config_file, 'rU').read() in self.__dict__
if self.chdir:
os.chdir(self.chdir)
if not args:
pattern = '%s*.prof' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: func: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.prof files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help func" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
for file in args:
try:
f, line, func, time = \
self.get_function_profile(file, function_name)
except ValueError, e:
sys.stderr.write("%s: func: %s: %s\n" %
(self.name, file, e))
else:
if f.startswith(cwd_):
f = f[len(cwd_):]
print "%.3f %s:%d(%s)" % (time, f, line, func)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_function_time,
function_name)
self.gnuplot_results(results)
else:
sys.stderr.write('%s: func: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
#
def help_mem(self):
help = """\
Usage: scons-time mem [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--stage=STAGE Plot memory at the specified stage:
pre-read, post-read, pre-build,
post-build (default: post-build)
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_mem(self, argv):
format = 'ascii'
logfile_path = lambda x: x
stage = self.default_stage
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'stage=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'mem'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--stage',):
if not a in self.stages:
sys.stderr.write('%s: mem: Unrecognized stage "%s".\n' % (self.name, a))
sys.exit(1)
stage = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if self.config_file:
HACK_for_exec(open(self.config_file, 'rU').read(), self.__dict__)
if self.chdir:
os.chdir(self.chdir)
logfile_path = lambda x: os.path.join(self.chdir, x)
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: mem: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help mem" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
self.ascii_table(args, tuple(self.stages), self.get_memory, logfile_path)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_memory,
self.stage_strings[stage])
self.gnuplot_results(results)
else:
sys.stderr.write('%s: mem: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
return 0
#
def help_obj(self):
help = """\
Usage: scons-time obj [OPTIONS] OBJECT FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--stage=STAGE Plot memory at the specified stage:
pre-read, post-read, pre-build,
post-build (default: post-build)
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_obj(self, argv):
format = 'ascii'
logfile_path = lambda x: x
stage = self.default_stage
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'stage=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'obj'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--stage',):
if not a in self.stages:
sys.stderr.write('%s: obj: Unrecognized stage "%s".\n' % (self.name, a))
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
stage = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if not args:
sys.stderr.write('%s: obj: Must specify an object name.\n' % self.name)
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
object_name = args.pop(0)
if self.config_file:
HACK_for_exec(open(self.config_file, 'rU').read(), self.__dict__)
if self.chdir:
os.chdir(self.chdir)
logfile_path = lambda x: os.path.join(self.chdir, x)
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: obj: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
self.ascii_table(args, tuple(self.stages), self.get_object_counts, logfile_path, object_name)
elif format == 'gnuplot':
stage_index = 0
for s in self.stages:
if stage == s:
break
stage_index = stage_index + 1
results = self.collect_results(args, self.get_object_counts,
object_name, stage_index)
self.gnuplot_results(results)
else:
sys.stderr.write('%s: obj: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
return 0
#
def help_run(self):
help = """\
Usage: scons-time run [OPTIONS] [FILE ...]
--aegis=PROJECT Use SCons from the Aegis PROJECT
--chdir=DIR Name of unpacked directory for chdir
-f FILE, --file=FILE Read configuration from specified FILE
-h, --help Print this help and exit
-n, --no-exec No execute, just print command lines
--number=NUMBER Put output in files for run NUMBER
--outdir=OUTDIR Put output files in OUTDIR
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--python=PYTHON Time using the specified PYTHON
-q, --quiet Don't print command lines
--scons=SCONS Time using the specified SCONS
--svn=URL, --subversion=URL Use SCons from Subversion URL
-v, --verbose Display output of commands
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_run(self, argv):
"""
"""
run_number_list = [None]
short_opts = '?f:hnp:qs:v'
long_opts = [
'aegis=',
'file=',
'help',
'no-exec',
'number=',
'outdir=',
'prefix=',
'python=',
'quiet',
'scons=',
'svn=',
'subdir=',
'subversion=',
'verbose',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('--aegis',):
self.aegis_project = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'run'])
sys.exit(0)
elif o in ('-n', '--no-exec'):
self.execute = self._do_not_execute
elif o in ('--number',):
run_number_list = self.split_run_numbers(a)
elif o in ('--outdir',):
self.outdir = a
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--python',):
self.python = a
elif o in ('-q', '--quiet'):
self.display = self._do_not_display
elif o in ('-s', '--subdir'):
self.subdir = a
elif o in ('--scons',):
self.scons = a
elif o in ('--svn', '--subversion'):
self.subversion_url = a
elif o in ('-v', '--verbose'):
self.redirect = tee_to_file
self.verbose = True
self.svn_co_flag = ''
if not args and not self.config_file:
sys.stderr.write('%s: run: No arguments or -f config file specified.\n' % self.name)
sys.stderr.write('%s Type "%s help run" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
if self.config_file:
exec open(self.config_file, 'rU').read() in self.__dict__
if args:
self.archive_list = args
archive_file_name = os.path.split(self.archive_list[0])[1]
if not self.subdir:
self.subdir = self.archive_splitext(archive_file_name)[0]
if not self.prefix:
self.prefix = self.archive_splitext(archive_file_name)[0]
prepare = None
if self.subversion_url:
prepare = self.prep_subversion_run
elif self.aegis_project:
prepare = self.prep_aegis_run
for run_number in run_number_list:
self.individual_run(run_number, self.archive_list, prepare)
def split_run_numbers(self, s):
result = []
for n in s.split(','):
try:
x, y = n.split('-')
except ValueError:
result.append(int(n))
else:
result.extend(list(range(int(x), int(y)+1)))
return result
def scons_path(self, dir):
return os.path.join(dir, 'src', 'script', 'scons.py')
def scons_lib_dir_path(self, dir):
return os.path.join(dir, 'src', 'engine')
def prep_aegis_run(self, commands, removals):
self.aegis_tmpdir = make_temp_file(prefix = self.name + '-aegis-')
removals.append((shutil.rmtree, 'rm -rf %%s', self.aegis_tmpdir))
self.aegis_parent_project = os.path.splitext(self.aegis_project)[0]
self.scons = self.scons_path(self.aegis_tmpdir)
self.scons_lib_dir = self.scons_lib_dir_path(self.aegis_tmpdir)
commands.extend([
'mkdir %(aegis_tmpdir)s',
(lambda: os.chdir(self.aegis_tmpdir), 'cd %(aegis_tmpdir)s'),
'%(aegis)s -cp -ind -p %(aegis_parent_project)s .',
'%(aegis)s -cp -ind -p %(aegis_project)s -delta %(run_number)s .',
])
def prep_subversion_run(self, commands, removals):
self.svn_tmpdir = make_temp_file(prefix = self.name + '-svn-')
removals.append((shutil.rmtree, 'rm -rf %%s', self.svn_tmpdir))
self.scons = self.scons_path(self.svn_tmpdir)
self.scons_lib_dir = self.scons_lib_dir_path(self.svn_tmpdir)
commands.extend([
'mkdir %(svn_tmpdir)s',
'%(svn)s co %(svn_co_flag)s -r %(run_number)s %(subversion_url)s %(svn_tmpdir)s',
])
def individual_run(self, run_number, archive_list, prepare=None):
"""
Performs an individual run of the default SCons invocations.
"""
commands = []
removals = []
if prepare:
prepare(commands, removals)
save_scons = self.scons
save_scons_wrapper = self.scons_wrapper
save_scons_lib_dir = self.scons_lib_dir
if self.outdir is None:
self.outdir = self.orig_cwd
elif not os.path.isabs(self.outdir):
self.outdir = os.path.join(self.orig_cwd, self.outdir)
if self.scons is None:
self.scons = self.scons_path(self.orig_cwd)
if self.scons_lib_dir is None:
self.scons_lib_dir = self.scons_lib_dir_path(self.orig_cwd)
if self.scons_wrapper is None:
self.scons_wrapper = self.scons
if not run_number:
run_number = self.find_next_run_number(self.outdir, self.prefix)
self.run_number = str(run_number)
self.prefix_run = self.prefix + '-%03d' % run_number
if self.targets0 is None:
self.targets0 = self.startup_targets
if self.targets1 is None:
self.targets1 = self.targets
if self.targets2 is None:
self.targets2 = self.targets
self.tmpdir = make_temp_file(prefix = self.name + '-')
commands.extend([
'mkdir %(tmpdir)s',
(os.chdir, 'cd %%s', self.tmpdir),
])
for archive in archive_list:
if not os.path.isabs(archive):
archive = os.path.join(self.orig_cwd, archive)
if os.path.isdir(archive):
dest = os.path.split(archive)[1]
commands.append((shutil.copytree, 'cp -r %%s %%s', archive, dest))
else:
suffix = self.archive_splitext(archive)[1]
unpack_command = self.unpack_map.get(suffix)
if not unpack_command:
dest = os.path.split(archive)[1]
commands.append((shutil.copyfile, 'cp %%s %%s', archive, dest))
else:
commands.append(unpack_command + (archive,))
commands.extend([
(os.chdir, 'cd %%s', self.subdir),
])
commands.extend(self.initial_commands)
commands.extend([
(lambda: read_tree('.'),
'find * -type f | xargs cat > /dev/null'),
(self.set_env, 'export %%s=%%s',
'SCONS_LIB_DIR', self.scons_lib_dir),
'%(python)s %(scons_wrapper)s --version',
])
index = 0
for run_command in self.run_commands:
setattr(self, 'prof%d' % index, self.profile_name(index))
c = (
self.log_execute,
self.log_display,
run_command,
self.logfile_name(index),
)
commands.append(c)
index = index + 1
commands.extend([
(os.chdir, 'cd %%s', self.orig_cwd),
])
if not os.environ.get('PRESERVE'):
commands.extend(removals)
commands.append((shutil.rmtree, 'rm -rf %%s', self.tmpdir))
self.run_command_list(commands, self.__dict__)
self.scons = save_scons
self.scons_lib_dir = save_scons_lib_dir
self.scons_wrapper = save_scons_wrapper
#
def help_time(self):
help = """\
Usage: scons-time time [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--which=TIMER Plot timings for TIMER: total,
SConscripts, SCons, commands.
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_time(self, argv):
format = 'ascii'
logfile_path = lambda x: x
tail = None
which = 'total'
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'tail=',
'title=',
'which=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'time'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
elif o in ('--which',):
if not a in self.time_strings.keys():
sys.stderr.write('%s: time: Unrecognized timer "%s".\n' % (self.name, a))
sys.stderr.write('%s Type "%s help time" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
which = a
if self.config_file:
HACK_for_exec(open(self.config_file, 'rU').read(), self.__dict__)
if self.chdir:
os.chdir(self.chdir)
logfile_path = lambda x: os.path.join(self.chdir, x)
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: time: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help time" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
columns = ("Total", "SConscripts", "SCons", "commands")
self.ascii_table(args, columns, self.get_debug_times, logfile_path)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_debug_times,
self.time_strings[which])
self.gnuplot_results(results, fmt='%s %.6f')
else:
sys.stderr.write('%s: time: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'h?V', ['help', 'version'])
ST = SConsTimer()
for o, a in opts:
if o in ('-?', '-h', '--help'):
ST.do_help(['help'])
sys.exit(0)
elif o in ('-V', '--version'):
sys.stdout.write('scons-time version\n')
sys.exit(0)
if not args:
sys.stderr.write('Type "%s help" for usage.\n' % ST.name)
sys.exit(1)
ST.execute_subcommand(args)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
################################################################################
##
## Photivo
##
## Copyright (C) 2013 Jos De Laender <jos@de-laender.be>
##
## This file is part of Photivo.
##
## Photivo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License version 3
## as published by the Free Software Foundation.
##
## Photivo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Photivo. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import os
import sys
import platform
import fnmatch
import glob
import atexit
################################################################################
# Minimum requirements;
ptMinGCCVersion = '4.6.0'
ptMinQtVersion = '4.7.0'
ptMinGMVersion = '1.3.12'
ptMinGMWandVersion = ptMinGMVersion
ptMinExiv2Version = '0.19'
ptMinLcms2Version = '2.1'
ptMinGlib2Version = '2.18'
ptMinLensfunVersion = '0.2.5'
ptMinFftw3Version = '3.2.2'
ptMinLqr1Version = '0.4.1'
ptMinGimp20Version = '2.6.10' # only when gimp plugin
# Custom libjpeg checks. Has no pkg-config equivalent.
ptMinLibJpegVersion = 62
ptMaxLibJpegVersion = 80 # Until notice of problem we allow also up to jpeg 8.0
################################################################################
ptPlatforms = ['darwin','posix','win32']
ptArchitectures = ['x86','x86_64']
################################################################################
# Clean exit and exit logging.
atexit.register(ptLastCalledAtExit)
################################################################################
# Announce ourselves as the build program.
print ''
print ptBoldYellow + \
'This is the scons build program for Photivo.\n' + \
'Copyright (C) 2013 Jos De Laender <jos@de-laender.be>' + \
ptNoAttrs;
print ''
# Help, options and variables boiler plate.
HelpText = '''
Usage : scons [-Q] [--ptVerbose] [--ptVerboseConfig] [--ptBuildConfFile=FILE] [install]
-Q : Quiet about reading/building progress messages.
(default : not quiet)
--ptVerbose : Verbose output of progress during compile.
(default : not verbose)
--ptVerboseConfig : Verbose output of progress during config.
(default : not verbose)
--ptBuildConfFile : File that describes the build parameters.
(default = DefaultBuild.py)
install : Install in directory (defined by PT_INSTALL_PATH)
'''
Help(HelpText)
AddOption('--ptBuildConfFile',
dest = 'ptBuildConfFile',
type = 'string',
nargs = 1,
action = 'store',
metavar = 'FILE',
default = 'BuildConfs/DefaultBuild.py')
AddOption('--ptVerbose',
dest = 'ptVerbose',
action = 'store_true',
default = False)
AddOption('--ptVerboseConfig',
dest = 'ptVerboseConfig',
action = 'store_true',
default = False)
ptBuildConfFile = GetOption('ptBuildConfFile')
ptVerbose = GetOption('ptVerbose')
ptVerboseConfig = GetOption('ptVerboseConfig')
print ptBoldCyan + \
'Reading build configuration from \'' + ptBuildConfFile + '\'' + \
ptNoAttrs
# Use of simple file input (without 'Variables()' and command line input)
# enables a simpler and more correct guessing of values in more
# complex cases of local qt, gcc, etc ..
if not os.path.exists(ptBuildConfFile):
print ptBoldRed + \
'No such ptBuildConfFile : ' , ptBuildConfFile , \
ptNoAttrs
print ptNoAttrs + HelpText
Exit(1)
ptValidOptions = ['CC',
'CXX',
'PT_BUILD_CONF_NAME',
'PT_CROSS',
'PT_HOST_PLATFORM',
'PT_INSTALL_MODE',
'PT_INSTALL_PATH',
'PT_LOGFILE_NAME',
'PT_OMP',
'PT_RELEASE',
'PT_TARGET_ARCHITECTURE',
'PT_TARGET_PLATFORM',
'PT_TOOLS_DIR',
'PT_WITH_CONSOLE',
'PT_WITH_FILEMGR',
'PT_WITH_GIMPPLUGIN',
'PT_WITH_SYSTEMCIMG',
'PKG_CONFIG_PATH',
'QT4DIR']
# Defaults.
ptBuildValues = {'PT_BUILD_CONF_NAME' : 'Build',
'PT_CROSS' : '',
'PT_INSTALL_MODE' : 'Original',
'PT_INSTALL_PATH' : '/opt/Photivo',
'PT_OMP' : True,
'PT_RELEASE' : True,
'PT_WITH_FILEMGR' : False,
'PT_WITH_GIMPPLUGIN' : False,
'PT_WITH_SYSTEMCIMG' : False,
'PT_WITH_CONSOLE' : False}
# Read them from file
exec open(ptBuildConfFile, 'rU').read() in {}, ptBuildValues
#for key,value in ptBuildValues.items():
# print key + ' => ' + str(value)
# A default environment to start from.
ptDefaultEnv = Environment(CC = 'gcc', CXX = 'g++')
# For later reference. The unaltered one.
ptDefaultEnv['PT_DEFAULT_PATH'] = ptDefaultEnv['ENV']['PATH']
# In case of mingw we pull in lots of the environment.
# Forget the "isolated" environment in this case, but that's a minor issue
# I guess in this context. Unless someone would start to crosscompile under
# mingw, but I don't believe we are going it to make as insane as that ..
if sys.platform in ['win32'] :
print ptBoldBlue + \
'I seem to be running on a windows platform. ' + \
'Please note I assume to work under MSYS ' + \
'set up as in the wiki. Anything else will ' + \
'currently fail.' + \
ptNoAttrs
ptOsEnv = dict(os.environ)
# Path from MSYS
ptDefaultEnv['ENV']['PATH'] = \
ptDefaultEnv['ENV']['PATH'] + \
os.pathsep + \
ptOsEnv['PATH'].replace("/","\\")
# Additional flags from MSYS (see wiki)
ptDefaultEnv.MergeFlags(ptDefaultEnv.ParseFlags(ptOsEnv['LDFLAGS']))
ptDefaultEnv.MergeFlags(ptDefaultEnv.ParseFlags(ptOsEnv['CFLAGS']))
ptDefaultEnv.MergeFlags(ptDefaultEnv.ParseFlags(ptOsEnv['CXXFLAGS']))
# Additional PKG_CONFIG_PATH from MSYS (see wiki)
if 'PKG_CONFIG_PATH' in ptOsEnv :
ptOsPkgConfigPath = ptOsEnv['PKG_CONFIG_PATH'].replace("/","\\")
if not 'PKG_CONFIG_PATH' in ptDefaultEnv['ENV']:
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] = ptOsPkgConfigPath
else:
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] = \
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] + os.pathsep + ptOsPkgConfigPath
# Local MSYS compatible workaround for long link line.
ptDefaultEnv['TEMPFILE'] = ptTempFileMunge
ptDefaultEnv['LINKCOM'] = '${TEMPFILE("%s")}' % (ptDefaultEnv['LINKCOM'])
#print ptDefaultEnv.Dump()
# Do we have CC and CXX ?
if (ptDefaultEnv['CC'] == None) :
print ptBoldRed + 'CC not defined' + ptNoAttrs
print ptBoldRed + 'Giving up' + ptNoAttrs
Exit(1)
if (ptDefaultEnv['CXX'] == None) :
print ptBoldRed + 'CXX not defined' + ptNoAttrs
print ptBoldRed + 'Giving up' + ptNoAttrs
Exit(1)
# Throw everything that we recognize in the environment, overwriting.
for ptBuildKey,ptBuildValue in ptBuildValues.items():
if ptBuildKey in ptValidOptions:
ptDefaultEnv[ptBuildKey] = ptBuildValues[ptBuildKey]
else:
print ptBoldRed + \
'No such option : ' + ptBuildKey + \
' while reading ' + ptBuildConfFile + \
ptNoAttrs
print ptNoAttrs + HelpText
Exit(1)
# QT4DIR (name compatible with qt4 tool) via qmake if not yet in environment.
if not 'QT4DIR' in ptDefaultEnv:
ptEnv = Environment(ENV = os.environ)
qmake = ptEnv.WhereIs('qmake') or ptEnv.WhereIs('qmake-qt4')
if qmake:
ptDefaultEnv['QT4DIR'] = os.path.dirname(os.path.dirname(qmake))
else :
print ptBoldRed + \
'No QT4DIR found.' , \
ptNoAttrs
Exit(1)
# Check QT4DIR (user can have given wrong one)
if not os.path.isdir(ptDefaultEnv['QT4DIR']):
print ptBoldRed + \
'QT4DIR (' + ptDefaultEnv['QT4DIR'] + ') does not exist.' , \
ptNoAttrs
Exit(1)
# PT_TOOLS_DIR detection. If not yet in environment.
if not 'PT_TOOLS_DIR' in ptDefaultEnv:
cc = ptDefaultEnv.WhereIs(ptDefaultEnv['CC'])
if cc:
ptDefaultEnv['PT_TOOLS_DIR'] = os.path.dirname(cc)
else :
print ptBoldRed + \
'No PT_TOOLS_DIR found.' , \
ptNoAttrs
Exit(1)
# Check PT_TOOLS_DIR (user can have given wrong one)
if not os.path.isdir(ptDefaultEnv['PT_TOOLS_DIR']):
print ptBoldRed + \
'PT_TOOLS_DIR (' + ptDefaultEnv['PT_TOOLS_DIR'] + \
') does not exist.' , \
ptNoAttrs
Exit(1)
# PT_LOGFILE_NAME
if not 'PT_LOGFILE_NAME' in ptDefaultEnv:
ptDefaultEnv['PT_LOGFILE_NAME'] = ptDefaultEnv['PT_BUILD_CONF_NAME'] + '.log'
# Check PT_INSTALL_PATH
if not sys.platform in ['win32'] :
if not os.path.isdir(ptDefaultEnv['PT_INSTALL_PATH']):
print ptBoldRed + \
'PT_INSTALL_PATH (' + ptDefaultEnv['PT_INSTALL_PATH'] + \
') does not exist.' , \
ptNoAttrs
Exit(1)
# Target and host platform. Normally PLATFORM.
if not 'PT_TARGET_PLATFORM' in ptDefaultEnv:
ptDefaultEnv['PT_TARGET_PLATFORM'] = ptDefaultEnv['PLATFORM']
if not ptDefaultEnv['PT_TARGET_PLATFORM'] in ptPlatforms :
print ptBoldRed + \
'PT_TARGET_PLATFORM (' + ptDefaultEnv['PT_TARGET_PLATFORM'] + \
') should be in ' + str(ptPlatforms) + '.' + \
ptNoAttrs
Exit(1)
if not 'PT_HOST_PLATFORM' in ptDefaultEnv:
ptDefaultEnv['PT_HOST_PLATFORM'] = ptDefaultEnv['PLATFORM']
if not ptDefaultEnv['PT_HOST_PLATFORM'] in ptPlatforms :
print ptBoldRed + \
'PT_HOST_PLATFORM (' + ptDefaultEnv['PT_HOST_PLATFORM'] + \
') should be in ' + str(ptPlatforms) + '.' + \
ptNoAttrs
Exit(1)
# Target and host architecture.
if not 'PT_TARGET_ARCHITECTURE' in ptDefaultEnv:
ptArch = platform.architecture()[0]
if ptArch == '32bit' :
ptDefaultEnv['PT_TARGET_ARCHITECTURE'] = 'x86'
if ptArch == '64bit' :
ptDefaultEnv['PT_TARGET_ARCHITECTURE'] = 'x86_64'
if not ptDefaultEnv['PT_TARGET_ARCHITECTURE'] in ptArchitectures :
print ptBoldRed + \
'PT_TARGET_ARCHITECTURE (' + ptDefaultEnv['PT_TARGET_ARCHITECTURE'] + \
') should be in ' + str(ptArchitectures) + '.' + \
ptNoAttrs
Exit(1)
################################################################################
# Opening of LogFile
if not ptDefaultEnv['PT_LOGFILE_NAME']:
ptDefaultEnv['PT_LOGFILE_NAME'] = ptDefaultEnv['PT_BUILD_CONF_NAME'] + '.log'
ptLogFile = open(ptDefaultEnv['PT_LOGFILE_NAME'],'w',1) # Line buffered
ptDefaultEnv['PT_LOGFILE'] = ptLogFile
# I hope to duplicate compile errors (via stderr) into the log this way.
# TODO Find some win32 equivalent.
if not sys.platform in ['win32'] :
sys.stderr = os.popen('tee stderr.log','w')
atexit.register(ptAtExit,ptLogFile)
################################################################################
# Some extra derived environment.
# Spawn with stdout/stderr echoing from the child.
ptDefaultEnv['SPAWN'] = ptEchoSpawn
# Needed for above.
ptDefaultEnv['PT_VERBOSE'] = ptVerbose
ptDefaultEnv['PT_VERBOSECONFIG'] = ptVerboseConfig
# Extend the CC/CXX names for cross. XXX JDLA More might be needed. TODO
ptDefaultEnv['CC'] = ptDefaultEnv['PT_CROSS'] + ptDefaultEnv['CC']
ptDefaultEnv['CXX'] = ptDefaultEnv['PT_CROSS'] + ptDefaultEnv['CXX']
# Extend PATH with the found PT_TOOLS_DIR
ptDefaultEnv['ENV']['PATH'] = \
ptDefaultEnv['PT_TOOLS_DIR'] + os.pathsep + ptDefaultEnv['ENV']['PATH']
# Add or extend PKG_CONFIG_PATH
# Assuming that it is only needed if QT4DIR is 'non standard'
ptQtBin = os.path.join(str(ptDefaultEnv['QT4DIR']),'bin')
if not ptQtBin in ptDefaultEnv['PT_DEFAULT_PATH']:
ptPkgConfigPath = \
os.path.join(os.path.join(str(ptDefaultEnv['QT4DIR']),'lib'),'pkgconfig')
if not 'PKG_CONFIG_PATH' in ptDefaultEnv['ENV']:
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] = ptPkgConfigPath
else :
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] = \
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] + os.pathsep + ptPkgConfigPath
################################################################################
# Options summary so far.
ptDoPrint = False
if ptVerboseConfig:
ptDoPrint = True
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'Configuration file : ' + ptBuildConfFile)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'CC : ' + str(ptDefaultEnv['CC']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'CXX : ' + str(ptDefaultEnv['CXX']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_BUILD_CONF_NAME : ' + str(ptDefaultEnv['PT_BUILD_CONF_NAME']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_CROSS : ' + str(ptDefaultEnv['PT_CROSS']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_HOST_PLATFORM : ' + str(ptDefaultEnv['PT_HOST_PLATFORM']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_INSTALL_PATH : ' + str(ptDefaultEnv['PT_INSTALL_PATH']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_LOGFILE_NAME : ' + str(ptDefaultEnv['PT_LOGFILE_NAME']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_OMP : ' + str(ptDefaultEnv['PT_OMP']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_RELEASE : ' + str(ptDefaultEnv['PT_RELEASE']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_TARGET_ARCHITECTURE : ' + str(ptDefaultEnv['PT_TARGET_ARCHITECTURE']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_TARGET_PLATFORM : ' + str(ptDefaultEnv['PT_TARGET_PLATFORM']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_TOOLS_DIR : ' + str(ptDefaultEnv['PT_TOOLS_DIR']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_WITH_CONSOLE : ' + str(ptDefaultEnv['PT_WITH_CONSOLE']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_WITH_FILEMGR : ' + str(ptDefaultEnv['PT_WITH_FILEMGR']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_WITH_GIMPPLUGIN : ' + str(ptDefaultEnv['PT_WITH_GIMPPLUGIN']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_WITH_SYTSTEMCIMG : ' + str(ptDefaultEnv['PT_WITH_SYSTEMCIMG']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'ENV[PATH] : ' + str(ptDefaultEnv['ENV']['PATH']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'ENV[PKG_CONFIG_PATH] : ' + str(ptDefaultEnv['ENV'].get('PKG_CONFIG_PATH')))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'QT4DIR : ' + str(ptDefaultEnv['QT4DIR']))
################################################################################
if ptDefaultEnv['PT_TARGET_PLATFORM'] == 'win32' :
ptDefaultEnv['PROGSUFFIX'] = '.exe'
################################################################################
# Minimum compiler version check.
if not ptCheckGCCVersion(ptDefaultEnv,ptMinGCCVersion) :
ptPrintLog(True,ptLogFile,ptBoldRed,
'GCC >= ' + ptMinGCCVersion + ' not found.')
ptVersion = ptGetGCCVersion(ptDefaultEnv)
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found GCC : ' + ptVersion[0])
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found G++ : ' + ptVersion[1])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
################################################################################
# Check for libraries pkg-config and Qt version.
ptConf = Configure(ptDefaultEnv,
custom_tests =
{'ptCheckPKGConfig' : ptCheckPKGConfig,
'ptCheckPKG' : ptCheckPKG ,
'ptCheckQt' : ptCheckQt ,
'ptCheckLibWithHeader' : ptCheckLibWithHeader,
'ptCheckHg' : ptCheckHg,
'ptCheckLibJpeg' : ptCheckLibJpeg,
'ptGetPKGOutput' : ptGetPKGOutput,
'ptGetQtOutput' : ptGetQtOutput})
# hg check
if not ptConf.ptCheckHg():
ptPrintLog(True,ptLogFile,ptBoldRed,'Mercurial (hg) not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# Version we are building
ptAppVersion = ptGetAppVersion()
# jpeg check. Note header file might be tricky and need tweak !
if not ptConf.ptCheckLibWithHeader('jpeg','jpeglib.h','cxx'):
ptPrintLog(True,ptLogFile,ptBoldRed,'Library jpeg (or jpeglib.h) not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'It is not unusual you have to add \n'
'"#include <stdlib.h>" and "#include <stdio.h>" \n'
'to your "jpeglib.h".')
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# Additional custom test on jpeg lib version.
# TODO Check doesn't work for CROSS (can't execute it on host ..)
if ptDefaultEnv['PT_TARGET_PLATFORM'] == ptDefaultEnv['PT_HOST_PLATFORM'] :
if not ptConf.ptCheckLibJpeg(ptMinLibJpegVersion,ptMaxLibJpegVersion):
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# png check.
if not ptConf.ptCheckLibWithHeader('png','png.h','cxx'):
ptPrintLog(True,ptLogFile,ptBoldRed,'Library png not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# tiff check.
if not ptConf.ptCheckLibWithHeader('tiff','tiff.h','cxx'):
ptPrintLog(True,ptLogFile,ptBoldRed,'Library tiff not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# pkg-config check. (does 'cross' behind the scenes).
if not ptConf.ptCheckPKGConfig('0.25'):
ptPrintLog(True,ptLogFile,ptBoldRed,'pkg-config >= 0.25 not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# lensfun check.
if not ptConf.ptCheckPKG('lensfun >= ' + ptMinLensfunVersion):
ptPrintLog(True,ptLogFile, ptBoldRed,
'lensfun >= ' + ptMinLensfunVersion + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('lensfun')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptLensfunVersionString,ptLensfunFlags] = ptConf.ptGetPKGOutput('lensfun')
# fftw3 check.
if not ptConf.ptCheckPKG('fftw3 >= ' + ptMinFftw3Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'fftw3 >= ' + ptMinFftw3Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('fftw3')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptFftw3VersionString,ptFftw3Flags] = ptConf.ptGetPKGOutput('fftw3')
# lqr-1 check.
if not ptConf.ptCheckPKG('lqr-1 >= ' + ptMinLqr1Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'lqr-1 >= ' + ptMinLqr1Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('lqr-1')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptLqr1VersionString,ptLqr1Flags] = ptConf.ptGetPKGOutput('lqr-1')
# glib-2.0 check.
if not ptConf.ptCheckPKG('glib-2.0 >= ' + ptMinGlib2Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'glib-2.0 >= ' + ptMinGlib2Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('glib-2.0')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptGlib2VersionString,ptGlib2Flags] = ptConf.ptGetPKGOutput('glib-2.0')
# exiv2 check.
if not ptConf.ptCheckPKG('exiv2 >= ' + ptMinExiv2Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'exiv2 >= ' + ptMinExiv2Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('exiv2')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptExiv2VersionString,ptExiv2Flags] = ptConf.ptGetPKGOutput('exiv2')
# lcms2 check.
if not ptConf.ptCheckPKG('lcms2 >= ' + ptMinLcms2Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'lcms2 >= ' + ptMinLcms2Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('lcms2')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptLcms2VersionString,ptLcms2Flags] = ptConf.ptGetPKGOutput('lcms2')
# GraphicsMagick check.
if not ptConf.ptCheckPKG('GraphicsMagick++ >= ' + ptMinGMVersion):
ptPrintLog(True,ptLogFile, ptBoldRed,
'Magick++ >= ' + ptMinGMVersion + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('GraphicsMagick++')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptGMVersionString,ptGMFlags] = ptConf.ptGetPKGOutput('GraphicsMagick++')
# GraphicsMagickWand check.
if not ptConf.ptCheckPKG('GraphicsMagickWand >= ' + ptMinGMWandVersion):
ptPrintLog(True,ptLogFile, ptBoldRed,
'MagickWand >= ' + ptMinGMWandVersion + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('GraphicsMagickWand')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptGMWandVersionString,ptGMWandFlags] = ptConf.ptGetPKGOutput('GraphicsMagickWand')
# QT check.
if not ptConf.ptCheckQt(ptMinQtVersion) :
ptPrintLog(True,ptLogFile, ptBoldRed,
'Qt >= ' + ptMinQtVersion + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetQtOutput()[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptQtVersionString,ptQtFlags] = ptConf.ptGetQtOutput()
# libgimp check in case we are working with GIMPPLUGIN
if ptDefaultEnv['PT_WITH_GIMPPLUGIN']:
if not ptConf.ptCheckPKG('gimp-2.0 >= ' + ptMinGimp20Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'gimp-2.0 >= ' + ptMinGimp20Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('gimp-2.0')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptGimp20VersionString,ptGimp20Flags] = ptConf.ptGetPKGOutput('gimp-2.0')
# Some functions check.
if ptConf.CheckFunc('getc_unlocked'):
ptConf.env.Append(CPPDEFINES = ['-DHAVE_GETC_UNLOCKED'])
if ptConf.CheckFunc('ftello'):
ptConf.env.Append(CPPDEFINES = ['-DHAVE_FTELLO'])
# Version defines.
ptConf.env.Append(CPPDEFINES = ['-DAPPVERSION=\'' + ptAppVersion + '\''])
# Prefix defines.
ptConf.env.Append(CPPDEFINES = \
['-DPREFIX=\'' + ptDefaultEnv['PT_INSTALL_PATH'] + '\''])
# System CIMG
if ptDefaultEnv['PT_WITH_SYSTEMCIMG']:
ptConf.env.Append(CPPDEFINES = ['-DSYSTEM_CIMG'])
# FileMgr
if not ptDefaultEnv['PT_WITH_FILEMGR']:
ptConf.env.Append(CPPDEFINES = ['-DPT_WITHOUT_FILEMGR'])
# Finalize configuration
ptConf.Finish()
# Show summary results.
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lensfun version : ' + ptLensfunVersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'fftw3 version : ' + ptFftw3VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lqr-1 version : ' + ptLqr1VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'glib-2.0 version : ' + ptGlib2VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'exiv2 version : ' + ptExiv2VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lcms2 version : ' + ptLcms2VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'GM version : ' + ptGMVersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'GM Wand version : ' + ptGMWandVersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'Qt version : ' + ptQtVersionString)
if ptDefaultEnv['PT_WITH_GIMPPLUGIN']:
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'Gimp20 version : ' + ptGimp20VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lensfun flags : ' + ptLensfunFlags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'fftw3 flags : ' + ptFftw3Flags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lqr-1 flags : ' + ptLqr1Flags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'glib-2.0 flags : ' + ptGlib2Flags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'exiv2 flags : ' + ptExiv2Flags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lcms2 flags : ' + ptLcms2Flags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'GM flags : ' + ptGMFlags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'GM Wand flags : ' + ptGMWandFlags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'Qt flags : ' + ptQtFlags)
if ptDefaultEnv['PT_WITH_GIMPPLUGIN']:
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'Gimp20 flags : ' + ptGimp20Flags)
################################################################################
# Parse all the flags collected up to now.
ptParsedLensfunFlags = ptDefaultEnv.ParseFlags(ptLensfunFlags)
ptDefaultEnv.MergeFlags(ptParsedLensfunFlags)
ptParsedFftw3Flags = ptDefaultEnv.ParseFlags(ptFftw3Flags)
ptDefaultEnv.MergeFlags(ptParsedFftw3Flags)
ptParsedLqr1Flags = ptDefaultEnv.ParseFlags(ptLqr1Flags)
ptDefaultEnv.MergeFlags(ptParsedLqr1Flags)
ptParsedGlib2Flags = ptDefaultEnv.ParseFlags(ptGlib2Flags)
ptDefaultEnv.MergeFlags(ptParsedGlib2Flags)
ptParsedExiv2Flags = ptDefaultEnv.ParseFlags(ptExiv2Flags)
ptDefaultEnv.MergeFlags(ptParsedExiv2Flags)
ptParsedLcms2Flags = ptDefaultEnv.ParseFlags(ptLcms2Flags)
ptDefaultEnv.MergeFlags(ptParsedLcms2Flags)
ptParsedGMFlags = ptDefaultEnv.ParseFlags(ptGMFlags)
ptDefaultEnv.MergeFlags(ptParsedGMFlags)
ptParsedGMWandFlags = ptDefaultEnv.ParseFlags(ptGMWandFlags)
ptDefaultEnv.MergeFlags(ptParsedGMWandFlags)
ptParsedQtFlags = ptDefaultEnv.ParseFlags(ptQtFlags)
ptDefaultEnv.MergeFlags(ptParsedQtFlags)
if ptDefaultEnv['PT_WITH_GIMPPLUGIN']:
ptParsedGimp20Flags = ptDefaultEnv.ParseFlags(ptGimp20Flags)
ptDefaultEnv.MergeFlags(ptParsedGimp20Flags)
################################################################################
# Command printing via a wrapper function for decorating and logging.
# After the configure checks, in order not to pollute the log.
ptDefaultEnv['PRINT_CMD_LINE_FUNC'] = ptPrintCmdLine
################################################################################
# Pure for scons printing recognition.
ptDefaultEnv.Append(CXXFLAGS = ['-DSCONS_CXX'])
ptDefaultEnv.Append(CCFLAGS = ['-DSCONS_CC'])
ptDefaultEnv.Append(LINKFLAGS = ['-DSCONS_LINK'])
################################################################################
# Common settings for compiler and linker.
ptDefaultEnv.Append(CCFLAGS = ['-ffast-math'])
ptDefaultEnv.Append(CCFLAGS = ['-Wall'])
ptDefaultEnv.Append(CCFLAGS = ['-Werror'])
ptDefaultEnv.Append(CCFLAGS = ['-Wextra'])
ptDefaultEnv.Append(CXXFLAGS = ['-std=gnu++0x'])
if ptDefaultEnv['PT_OMP']:
ptDefaultEnv.Append(CCFLAGS = ['-fopenmp'])
ptDefaultEnv.Append(LIBS = ['gomp','pthread'])
if ptDefaultEnv['PT_RELEASE'] == True:
ptDefaultEnv.Append(CCFLAGS = ['-O3'])
ptDefaultEnv.Append(CCFLAGS = ['-funroll-loops', '-ftree-vectorize'])
ptDefaultEnv.Append(CCFLAGS = ['-DQT_NO_DEBUG'])
if ptDefaultEnv['PT_TARGET_PLATFORM'] not in 'darwin' :
ptDefaultEnv.Append(LINKFLAGS = ['-Wl,-O1'])
else:
ptDefaultEnv.Append(CCFLAGS = ['-g'])
ptDefaultEnv.Append(CCFLAGS = ['-DQT_DEBUG'])
if ptDefaultEnv['PT_TARGET_PLATFORM'] not in 'darwin' :
ptDefaultEnv.Append(CCFLAGS = ['-O1'])
if ptDefaultEnv['PT_TARGET_PLATFORM'] in ['win32'] :
ptDefaultEnv.Append(LIBS = ['ole32','wsock32','expat','gdi32','iconv'])
if ptDefaultEnv['PT_WITH_CONSOLE'] == True:
ptDefaultEnv.Append(LINKFLAGS = ['-Wl,-subsystem,console'])
else:
ptDefaultEnv.Append(LINKFLAGS = ['-Wl,-subsystem,windows'])
if ptDefaultEnv['PT_TARGET_ARCHITECTURE'] not in ['x86_64'] :
# This can go wild ? XXX JDLA We set it i686 without actually knowing ?
ptDefaultEnv.Append(CCFLAGS = ['-march=i686'])
################################################################################
# Make a qt4 env.
# XXX JDLA TODO Not fully understood why needed : in any
# case when not doing so, .qrc (rcc) fails to be recognized ...
ptQtEnv = ptDefaultEnv.Clone();
ptQtEnv.Tool('qt4')
################################################################################
# Subsidiary scripts in a variant build.
SConscript(os.path.join('Sources','SConscript'),
variant_dir = os.path.join('Build',
os.path.join(ptDefaultEnv['PT_BUILD_CONF_NAME'],
'Build_Photivo')),
exports = 'ptQtEnv')
################################################################################
# Install
if ptDefaultEnv['PT_INSTALL_MODE'] == 'Original' :
ptOrgList = []
ptTgtList = []
# binaries.
if ptDefaultEnv['PT_TARGET_PLATFORM'] in ['darwin','posix'] :
ptOrgList += ['photivo']
ptOrgList += ['ptClear']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + '/bin/photivo']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + '/bin/ptclear']
if ptDefaultEnv['PT_TARGET_PLATFORM'] in ['win32']:
ptOrgList += ['photivo.exe']
ptOrgList += ['ptClear.exe']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + '/bin/photivo.exe']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + '/bin/ptclear.exe']
# desktop. (twice : also in .local
ptOrgList += ['ReferenceMaterial/photivo.desktop']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + \
'/share/applications/photivo.desktop']
ptOrgList += ['ReferenceMaterial/photivo.desktop']
ptTgtList += ['~/.local/share/applications/photivo.desktop']
# icon.
ptOrgList += ['qrc/photivo-appicon.png']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + \
'/share/pixmap/photivo-appicon.png']
# Curves etc ..
for Dir in ['Curves','ChannelMixers','Presets','Profiles','Translations',
'LensfunDatabase','UISettings','Themes']:
for Root,DirNames,FileNames in os.walk(Dir):
for FileName in FileNames:
ptOrgList += [os.path.join(Root,FileName)]
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + \
'/share/photivo/' + os.path.join(Root,FileName)]
if ptDefaultEnv['PT_HOST_PLATFORM'] == 'posix' and \
ptDefaultEnv['PT_TARGET_PLATFORM'] == 'posix' :
ptDefaultEnv.Alias('install',ptDefaultEnv['PT_INSTALL_PATH'])
ptDefaultEnv.InstallAs(ptTgtList,ptOrgList)
################################################################################
# import script for building .app bundle
# XXX JDLA TODO : Integrate better.
if ptDefaultEnv['PT_TARGET_PLATFORM'] in ['darwin'] :
import osx_app_bundle
################################################################################
| Python |
#! /usr/bin/env python
#
# SCons - a Software Constructor
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/script/sconsign.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__version__ = "2.2.0"
__build__ = "issue-2856:2676:d23b7a2f45e8[MODIFIED]"
__buildsys__ = "oberbrunner-dev"
__date__ = "2012/08/05 15:38:28"
__developer__ = "garyo"
import os
import sys
##############################################################################
# BEGIN STANDARD SCons SCRIPT HEADER
#
# This is the cut-and-paste logic so that a self-contained script can
# interoperate correctly with different SCons versions and installation
# locations for the engine. If you modify anything in this section, you
# should also change other scripts that use this same header.
##############################################################################
# Strip the script directory from sys.path() so on case-insensitive
# (WIN32) systems Python doesn't think that the "scons" script is the
# "SCons" package. Replace it with our own library directories
# (version-specific first, in case they installed by hand there,
# followed by generic) so we pick up the right version of the build
# engine modules if they're in either directory.
script_dir = sys.path[0]
if script_dir in sys.path:
sys.path.remove(script_dir)
libs = []
if "SCONS_LIB_DIR" in os.environ:
libs.append(os.environ["SCONS_LIB_DIR"])
local_version = 'scons-local-' + __version__
local = 'scons-local'
if script_dir:
local_version = os.path.join(script_dir, local_version)
local = os.path.join(script_dir, local)
libs.append(os.path.abspath(local_version))
libs.append(os.path.abspath(local))
scons_version = 'scons-%s' % __version__
# preferred order of scons lookup paths
prefs = []
try:
import pkg_resources
except ImportError:
pass
else:
# when running from an egg add the egg's directory
try:
d = pkg_resources.get_distribution('scons')
except pkg_resources.DistributionNotFound:
pass
else:
prefs.append(d.location)
if sys.platform == 'win32':
# sys.prefix is (likely) C:\Python*;
# check only C:\Python*.
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, 'Lib', 'site-packages'))
else:
# On other (POSIX) platforms, things are more complicated due to
# the variety of path names and library locations. Try to be smart
# about it.
if script_dir == 'bin':
# script_dir is `pwd`/bin;
# check `pwd`/lib/scons*.
prefs.append(os.getcwd())
else:
if script_dir == '.' or script_dir == '':
script_dir = os.getcwd()
head, tail = os.path.split(script_dir)
if tail == "bin":
# script_dir is /foo/bin;
# check /foo/lib/scons*.
prefs.append(head)
head, tail = os.path.split(sys.prefix)
if tail == "usr":
# sys.prefix is /foo/usr;
# check /foo/usr/lib/scons* first,
# then /foo/usr/local/lib/scons*.
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, "local"))
elif tail == "local":
h, t = os.path.split(head)
if t == "usr":
# sys.prefix is /foo/usr/local;
# check /foo/usr/local/lib/scons* first,
# then /foo/usr/lib/scons*.
prefs.append(sys.prefix)
prefs.append(head)
else:
# sys.prefix is /foo/local;
# check only /foo/local/lib/scons*.
prefs.append(sys.prefix)
else:
# sys.prefix is /foo (ends in neither /usr or /local);
# check only /foo/lib/scons*.
prefs.append(sys.prefix)
temp = [os.path.join(x, 'lib') for x in prefs]
temp.extend([os.path.join(x,
'lib',
'python' + sys.version[:3],
'site-packages') for x in prefs])
prefs = temp
# Add the parent directory of the current python's library to the
# preferences. On SuSE-91/AMD64, for example, this is /usr/lib64,
# not /usr/lib.
try:
libpath = os.__file__
except AttributeError:
pass
else:
# Split /usr/libfoo/python*/os.py to /usr/libfoo/python*.
libpath, tail = os.path.split(libpath)
# Split /usr/libfoo/python* to /usr/libfoo
libpath, tail = os.path.split(libpath)
# Check /usr/libfoo/scons*.
prefs.append(libpath)
# Look first for 'scons-__version__' in all of our preference libs,
# then for 'scons'.
libs.extend([os.path.join(x, scons_version) for x in prefs])
libs.extend([os.path.join(x, 'scons') for x in prefs])
sys.path = libs + sys.path
##############################################################################
# END STANDARD SCons SCRIPT HEADER
##############################################################################
import SCons.compat # so pickle will import cPickle instead
import whichdb
import time
import pickle
import imp
import SCons.SConsign
def my_whichdb(filename):
if filename[-7:] == ".dblite":
return "SCons.dblite"
try:
f = open(filename + ".dblite", "rb")
f.close()
return "SCons.dblite"
except IOError:
pass
return _orig_whichdb(filename)
_orig_whichdb = whichdb.whichdb
whichdb.whichdb = my_whichdb
def my_import(mname):
if '.' in mname:
i = mname.rfind('.')
parent = my_import(mname[:i])
fp, pathname, description = imp.find_module(mname[i+1:],
parent.__path__)
else:
fp, pathname, description = imp.find_module(mname)
return imp.load_module(mname, fp, pathname, description)
class Flagger(object):
default_value = 1
def __setitem__(self, item, value):
self.__dict__[item] = value
self.default_value = 0
def __getitem__(self, item):
return self.__dict__.get(item, self.default_value)
Do_Call = None
Print_Directories = []
Print_Entries = []
Print_Flags = Flagger()
Verbose = 0
Readable = 0
def default_mapper(entry, name):
try:
val = eval("entry."+name)
except:
val = None
return str(val)
def map_action(entry, name):
try:
bact = entry.bact
bactsig = entry.bactsig
except AttributeError:
return None
return '%s [%s]' % (bactsig, bact)
def map_timestamp(entry, name):
try:
timestamp = entry.timestamp
except AttributeError:
timestamp = None
if Readable and timestamp:
return "'" + time.ctime(timestamp) + "'"
else:
return str(timestamp)
def map_bkids(entry, name):
try:
bkids = entry.bsources + entry.bdepends + entry.bimplicit
bkidsigs = entry.bsourcesigs + entry.bdependsigs + entry.bimplicitsigs
except AttributeError:
return None
result = []
for i in range(len(bkids)):
result.append(nodeinfo_string(bkids[i], bkidsigs[i], " "))
if result == []:
return None
return "\n ".join(result)
map_field = {
'action' : map_action,
'timestamp' : map_timestamp,
'bkids' : map_bkids,
}
map_name = {
'implicit' : 'bkids',
}
def field(name, entry, verbose=Verbose):
if not Print_Flags[name]:
return None
fieldname = map_name.get(name, name)
mapper = map_field.get(fieldname, default_mapper)
val = mapper(entry, name)
if verbose:
val = name + ": " + val
return val
def nodeinfo_raw(name, ninfo, prefix=""):
# This just formats the dictionary, which we would normally use str()
# to do, except that we want the keys sorted for deterministic output.
d = ninfo.__dict__
try:
keys = ninfo.field_list + ['_version_id']
except AttributeError:
keys = sorted(d.keys())
l = []
for k in keys:
l.append('%s: %s' % (repr(k), repr(d.get(k))))
if '\n' in name:
name = repr(name)
return name + ': {' + ', '.join(l) + '}'
def nodeinfo_cooked(name, ninfo, prefix=""):
try:
field_list = ninfo.field_list
except AttributeError:
field_list = []
if '\n' in name:
name = repr(name)
outlist = [name+':'] + [_f for _f in [field(x, ninfo, Verbose) for x in field_list] if _f]
if Verbose:
sep = '\n ' + prefix
else:
sep = ' '
return sep.join(outlist)
nodeinfo_string = nodeinfo_cooked
def printfield(name, entry, prefix=""):
outlist = field("implicit", entry, 0)
if outlist:
if Verbose:
print " implicit:"
print " " + outlist
outact = field("action", entry, 0)
if outact:
if Verbose:
print " action: " + outact
else:
print " " + outact
def printentries(entries, location):
if Print_Entries:
for name in Print_Entries:
try:
entry = entries[name]
except KeyError:
sys.stderr.write("sconsign: no entry `%s' in `%s'\n" % (name, location))
else:
try:
ninfo = entry.ninfo
except AttributeError:
print name + ":"
else:
print nodeinfo_string(name, entry.ninfo)
printfield(name, entry.binfo)
else:
for name in sorted(entries.keys()):
entry = entries[name]
try:
ninfo = entry.ninfo
except AttributeError:
print name + ":"
else:
print nodeinfo_string(name, entry.ninfo)
printfield(name, entry.binfo)
class Do_SConsignDB(object):
def __init__(self, dbm_name, dbm):
self.dbm_name = dbm_name
self.dbm = dbm
def __call__(self, fname):
# The *dbm modules stick their own file suffixes on the names
# that are passed in. This is causes us to jump through some
# hoops here to be able to allow the user
try:
# Try opening the specified file name. Example:
# SPECIFIED OPENED BY self.dbm.open()
# --------- -------------------------
# .sconsign => .sconsign.dblite
# .sconsign.dblite => .sconsign.dblite.dblite
db = self.dbm.open(fname, "r")
except (IOError, OSError), e:
print_e = e
try:
# That didn't work, so try opening the base name,
# so that if the actually passed in 'sconsign.dblite'
# (for example), the dbm module will put the suffix back
# on for us and open it anyway.
db = self.dbm.open(os.path.splitext(fname)[0], "r")
except (IOError, OSError):
# That didn't work either. See if the file name
# they specified just exists (independent of the dbm
# suffix-mangling).
try:
open(fname, "r")
except (IOError, OSError), e:
# Nope, that file doesn't even exist, so report that
# fact back.
print_e = e
sys.stderr.write("sconsign: %s\n" % (print_e))
return
except KeyboardInterrupt:
raise
except pickle.UnpicklingError:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s'\n" % (self.dbm_name, fname))
return
except Exception, e:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s': %s\n" % (self.dbm_name, fname, e))
return
if Print_Directories:
for dir in Print_Directories:
try:
val = db[dir]
except KeyError:
sys.stderr.write("sconsign: no dir `%s' in `%s'\n" % (dir, args[0]))
else:
self.printentries(dir, val)
else:
for dir in sorted(db.keys()):
self.printentries(dir, db[dir])
def printentries(self, dir, val):
print '=== ' + dir + ':'
printentries(pickle.loads(val), dir)
def Do_SConsignDir(name):
try:
fp = open(name, 'rb')
except (IOError, OSError), e:
sys.stderr.write("sconsign: %s\n" % (e))
return
try:
sconsign = SCons.SConsign.Dir(fp)
except KeyboardInterrupt:
raise
except pickle.UnpicklingError:
sys.stderr.write("sconsign: ignoring invalid .sconsign file `%s'\n" % (name))
return
except Exception, e:
sys.stderr.write("sconsign: ignoring invalid .sconsign file `%s': %s\n" % (name, e))
return
printentries(sconsign.entries, args[0])
##############################################################################
import getopt
helpstr = """\
Usage: sconsign [OPTIONS] FILE [...]
Options:
-a, --act, --action Print build action information.
-c, --csig Print content signature information.
-d DIR, --dir=DIR Print only info about DIR.
-e ENTRY, --entry=ENTRY Print only info about ENTRY.
-f FORMAT, --format=FORMAT FILE is in the specified FORMAT.
-h, --help Print this message and exit.
-i, --implicit Print implicit dependency information.
-r, --readable Print timestamps in human-readable form.
--raw Print raw Python object representations.
-s, --size Print file sizes.
-t, --timestamp Print timestamp information.
-v, --verbose Verbose, describe each field.
"""
opts, args = getopt.getopt(sys.argv[1:], "acd:e:f:hirstv",
['act', 'action',
'csig', 'dir=', 'entry=',
'format=', 'help', 'implicit',
'raw', 'readable',
'size', 'timestamp', 'verbose'])
for o, a in opts:
if o in ('-a', '--act', '--action'):
Print_Flags['action'] = 1
elif o in ('-c', '--csig'):
Print_Flags['csig'] = 1
elif o in ('-d', '--dir'):
Print_Directories.append(a)
elif o in ('-e', '--entry'):
Print_Entries.append(a)
elif o in ('-f', '--format'):
Module_Map = {'dblite' : 'SCons.dblite',
'sconsign' : None}
dbm_name = Module_Map.get(a, a)
if dbm_name:
try:
dbm = my_import(dbm_name)
except:
sys.stderr.write("sconsign: illegal file format `%s'\n" % a)
print helpstr
sys.exit(2)
Do_Call = Do_SConsignDB(a, dbm)
else:
Do_Call = Do_SConsignDir
elif o in ('-h', '--help'):
print helpstr
sys.exit(0)
elif o in ('-i', '--implicit'):
Print_Flags['implicit'] = 1
elif o in ('--raw',):
nodeinfo_string = nodeinfo_raw
elif o in ('-r', '--readable'):
Readable = 1
elif o in ('-s', '--size'):
Print_Flags['size'] = 1
elif o in ('-t', '--timestamp'):
Print_Flags['timestamp'] = 1
elif o in ('-v', '--verbose'):
Verbose = 1
if Do_Call:
for a in args:
Do_Call(a)
else:
for a in args:
dbm_name = whichdb.whichdb(a)
if dbm_name:
Map_Module = {'SCons.dblite' : 'dblite'}
dbm = my_import(dbm_name)
Do_SConsignDB(Map_Module.get(dbm_name, dbm_name), dbm)(a)
else:
Do_SConsignDir(a)
sys.exit(0)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
################################################################################
##
## photivo
##
## Copyright (C) 2013 Jos De Laender <jos@de-laender.be>
##
## This file is part of photivo.
##
## photivo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License version 3
## as published by the Free Software Foundation.
##
## photivo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with photivo. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import os.path
Import('ptQtEnv')
ptLocalEnv = ptQtEnv.Clone()
ptLocalEnv.EnableQt4Modules(['QtGui','QtCore','QtNetwork'])
ptForms = Glob('*.ui')
ptForms += Glob('*/*.ui')
ptForms += Glob('*/*/*.ui')
ptLocalEnv.Uic4(ptForms)
ptLocalEnv.Append(CPPPATH = ['.'])
################################################################################
# rpath addition in case qt or tools is non default.
# XXX JDLA CHECKME
if ptLocalEnv['PT_TARGET_PLATFORM'] == 'posix' :
ptQtBin = os.path.join(str(ptLocalEnv['QT4DIR']),'bin')
if not ptQtBin in ptLocalEnv['PT_DEFAULT_PATH']:
ptLocalEnv.Append(LINKFLAGS = ['-Wl,-rpath',ptLocalEnv['QT4_LIBPATH']])
if not ptLocalEnv['PT_TOOLS_DIR'] in ptLocalEnv['PT_DEFAULT_PATH']:
# TODO Currently roughly ok for linux. The libname needs 'variation'
ptCommand = ptLocalEnv['CC'] + ' --print-file-name=libstdc++.so'
ptLibStdc = os.path.normpath(os.popen(ptCommand).read().rstrip())
ptLibStdcPath = os.path.dirname(ptLibStdc)
ptLocalEnv.Append(LINKFLAGS = ['-Wl,-rpath',ptLibStdcPath])
################################################################################
# Summary
ptLogFile = ptLocalEnv['PT_LOGFILE']
ptVerboseConfig = ptLocalEnv['PT_VERBOSECONFIG']
ptDoPrint = False
if ptVerboseConfig:
ptDoPrint = True
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,'Summary for Sources/SConscript')
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'QT4_BINPATH : ' + str(ptLocalEnv['QT4_BINPATH']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'QT4_LIBPATH : ' + str(ptLocalEnv['QT4_LIBPATH']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'QT4_MOC : ' + str(ptLocalEnv['QT4_MOC']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'QT4_UIC : ' + str(ptLocalEnv['QT4_UIC']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'QT4_RCC : ' + str(ptLocalEnv['QT4_RCC']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'CPPFLAGS : ' + str(ptLocalEnv.get('CPPFLAGS')))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'CPPPATH : ' + str(ptLocalEnv.get('CPPPATH')))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'CPPDEFINES : ' + str(ptLocalEnv.get('CPPDEFINES')))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'CCFLAGS : ' + str(ptLocalEnv.get('CCFLAGS')))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'CXXFLAGS : ' + str(ptLocalEnv.get('CXXFLAGS')))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'LINKFLAGS : ' + str(ptLocalEnv.get('LINKFLAGS')))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'LIBS : ' + str(ptLocalEnv.get('LIBS')))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'LIBPATH : ' + str(ptLocalEnv.get('LIBPATH')))
################################################################################
# Fixed, not generated sources.
# Currently this is cut/paste from photivoProject.pro.
# Alternatively it can be generated from it (if some IDE would generate).
# Or some intelligent Globbing can be done, in case not to many noise files
# are in.
# Check with Mike. Not 'standalone' but included I believe :
# ptSources += ['dcb/dcb_demosaicing.c']
# ptSources += ['perfectraw/lmmse_interpolate.c']
# ptSources += ['rawtherapee/amaze_interpolate.c']
# ptSources += ['rawtherapee/ca_correct.c']
# ptSources += ['rawtherapee/cfa_line_dn.c']
# ptSources += ['rawtherapee/green_equil.c']
# ptSources += ['vcd/ahd_interpolate_mod.c']
# ptSources += ['vcd/ahd_partial_interpolate.c']
# ptSources += ['vcd/es_median_filter.c']
# ptSources += ['vcd/median_filter_new.c']
# ptSources += ['vcd/refinement.c']
# ptSources += ['vcd/vcd_interpolate.c']
# ptSources += ['qtsingleapplication/qtlockedfile.cpp']
# ptSources += ['qtsingleapplication/qtlockedfile_unix.cpp']
# ptSources += ['qtsingleapplication/qtlockedfile_win.cpp']
ptSources = []
ptSources += ['filemgmt/ptColumnGridThumbnailLayouter.cpp']
ptSources += ['filemgmt/ptFileMgrDM.cpp']
ptSources += ['filemgmt/ptFileMgrWindow.cpp']
ptSources += ['filemgmt/ptGraphicsSceneEmitter.cpp']
ptSources += ['filemgmt/ptGraphicsThumbGroup.cpp']
ptSources += ['filemgmt/ptGridThumbnailLayouter.cpp']
ptSources += ['filemgmt/ptImageView.cpp']
ptSources += ['filemgmt/ptPathBar.cpp']
ptSources += ['filemgmt/ptRowGridThumbnailLayouter.cpp']
ptSources += ['filemgmt/ptSingleDirModel.cpp']
ptSources += ['filemgmt/ptTagList.cpp']
ptSources += ['filemgmt/ptTagModel.cpp']
ptSources += ['filemgmt/ptThumbnailCache.cpp']
ptSources += ['filemgmt/ptThumbnailer.cpp']
ptSources += ['filters/ptCfgItem.cpp']
ptSources += ['filters/ptFilter_ABCurves.cpp']
ptSources += ['filters/ptFilter_ColorContrast.cpp']
ptSources += ['filters/ptFilter_DetailCurve.cpp']
ptSources += ['filters/ptFilter_GammaTool.cpp']
ptSources += ['filters/ptFilter_Highlights.cpp']
ptSources += ['filters/ptFilter_LumaDenoiseCurve.cpp']
ptSources += ['filters/ptFilter_LumaSatAdjust.cpp']
ptSources += ['filters/ptFilter_Outline.cpp']
ptSources += ['filters/ptFilter_SatCurve.cpp']
ptSources += ['filters/ptFilter_ShadowsHighlights.cpp']
ptSources += ['filters/ptFilter_SigContrast.cpp']
ptSources += ['filters/ptFilter_StdCurve.cpp']
ptSources += ['filters/ptFilter_ToneAdjust.cpp']
ptSources += ['filters/ptFilter_Wiener.cpp']
ptSources += ['filters/ptFilterBase.cpp']
ptSources += ['filters/ptFilterConfig.cpp']
ptSources += ['filters/ptFilterDM.cpp']
ptSources += ['filters/ptFilterFactory.cpp']
ptSources += ['ptCalloc.cpp']
ptSources += ['ptChannelMixer.cpp']
ptSources += ['ptCheck.cpp']
ptSources += ['ptChoice.cpp']
ptSources += ['ptCimg.cpp']
ptSources += ['ptConfirmRequest.cpp']
ptSources += ['ptCurve.cpp']
ptSources += ['ptCurveWindow.cpp']
ptSources += ['ptDcRaw.cpp']
ptSources += ['ptError.cpp']
ptSources += ['ptFastBilateral.cpp']
ptSources += ['ptGridInteraction.cpp']
ptSources += ['ptGroupBox.cpp']
ptSources += ['ptGuiOptions.cpp']
ptSources += ['ptHistogramWindow.cpp']
ptSources += ['ptImage.cpp']
ptSources += ['ptImage_Cimg.cpp']
ptSources += ['ptImage_DRC.cpp']
ptSources += ['ptImage_EAW.cpp']
ptSources += ['ptImage_GM.cpp']
ptSources += ['ptImage_GMC.cpp']
ptSources += ['ptImage_Lensfun.cpp']
ptSources += ['ptImage_Lqr.cpp']
ptSources += ['ptImage_Pyramid.cpp']
ptSources += ['ptImage8.cpp']
ptSources += ['ptAbstractInteraction.cpp']
ptSources += ['ptImageHelper.cpp']
ptSources += ['ptInfo.cpp']
ptSources += ['ptInput.cpp']
ptSources += ['ptKernel.cpp']
ptSources += ['ptLensfun.cpp']
ptSources += ['ptLineInteraction.cpp']
ptSources += ['ptMain.cpp']
ptSources += ['ptMainWindow.cpp']
ptSources += ['ptMessageBox.cpp']
ptSources += ['ptParseCli.cpp']
ptSources += ['ptProcessor.cpp']
ptSources += ['ptReportOverlay.cpp']
ptSources += ['ptResizeFilters.cpp']
ptSources += ['ptRGBTemperature.cpp']
ptSources += ['ptRichRectInteraction.cpp']
ptSources += ['ptSettings.cpp']
ptSources += ['ptSimpleRectInteraction.cpp']
ptSources += ['ptSlider.cpp']
ptSources += ['ptTempFilterBase.cpp']
ptSources += ['ptTheme.cpp']
ptSources += ['ptToolBox.cpp']
ptSources += ['ptViewWindow.cpp']
ptSources += ['ptVisibleToolsView.cpp']
ptSources += ['ptWhiteBalances.cpp']
ptSources += ['ptWidget.cpp']
ptSources += ['ptWiener.cpp']
ptSources += ['qtsingleapplication/qtlocalpeer.cpp']
ptSources += ['qtsingleapplication/qtsingleapplication.cpp']
ptSources += ['filters/imagespot/ptFilter_SpotTuning.cpp']
ptSources += ['filters/imagespot/ptImageSpot.cpp']
ptSources += ['filters/imagespot/ptImageSpotEditor.cpp']
ptSources += ['filters/imagespot/ptImageSpotItemDelegate.cpp']
ptSources += ['filters/imagespot/ptImageSpotModel.cpp']
ptSources += ['filters/imagespot/ptTuningSpot.cpp']
#ptSources += ['filters/imagespot/ptRepairInteraction.cpp']
#ptSources += ['filters/imagespot/ptRepairSpot.cpp']
ptSources += ['filters/imagespot/ptSpotInteraction.cpp']
ptSources += ['filters/imagespot/ptSpotListWidget.cpp']
ptSources += ['ptTempFile.cpp']
ptSources += ['filters/imagespot/ptImageSpotList.cpp']
ptSources += ['filters/ptFilter_ColorIntensity.cpp']
ptSources += ['filters/ptFilter_Brightness.cpp']
ptSources += ['filters/ptFilter_ReinhardBrighten.cpp']
ptSources += ['filters/ptFilter_Normalization.cpp']
ptSources += ['filters/ptFilter_ColorEnhancement.cpp']
ptSources += ['filters/ptFilter_Levels.cpp']
ptSources += ['filters/ptFilter_LMHRecovery.cpp']
ptSources += ['filters/ptFilter_Drc.cpp']
ptSources += ['filters/ptFilter_LabTransform.cpp']
ptSources += ['filters/ptFilter_Saturation.cpp']
ptSources += ['filters/ptFilter_ColorBoost.cpp']
ptSources += ['filters/ptFilter_Tone.cpp']
ptSources += ['batch/ptJobListItem.cpp']
ptSources += ['batch/ptBatchWindow.cpp']
ptSources += ['batch/ptJobListModel.cpp']
if ptLocalEnv['PT_TARGET_PLATFORM'] in ['win32'] :
ptSources += ['ptEcWin7.cpp']
ptSources += ['ptWinApi.cpp']
# TODO Check me. This is now outside build dir.
# Maybe some copy action first or so ...
ptSources += [ptLocalEnv.GetLaunchDir() + '/qrc/photivo.qrc']
################################################################################
ptClear = ptLocalEnv.Program('ptClear',['ptClear.cpp'])
ptObj = [ptLocalEnv.Object(i) for i in [ptSources]]
ptPhotivo = ptLocalEnv.Program('photivo',ptObj)
# Dependency via ptEmu.qrc and that is not scanned. XXX TODO REFINE
# TODO Depends(photivo,Glob('Icons/*'))
ptLocalEnv.Install(ptLocalEnv.GetLaunchDir(),[ptPhotivo,ptClear])
################################################################################
| Python |
#! /usr/bin/env python
#
# SCons - a Software Constructor
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/script/scons.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__version__ = "2.2.0"
__build__ = "issue-2856:2676:d23b7a2f45e8[MODIFIED]"
__buildsys__ = "oberbrunner-dev"
__date__ = "2012/08/05 15:38:28"
__developer__ = "garyo"
import os
import sys
##############################################################################
# BEGIN STANDARD SCons SCRIPT HEADER
#
# This is the cut-and-paste logic so that a self-contained script can
# interoperate correctly with different SCons versions and installation
# locations for the engine. If you modify anything in this section, you
# should also change other scripts that use this same header.
##############################################################################
# Strip the script directory from sys.path() so on case-insensitive
# (WIN32) systems Python doesn't think that the "scons" script is the
# "SCons" package. Replace it with our own library directories
# (version-specific first, in case they installed by hand there,
# followed by generic) so we pick up the right version of the build
# engine modules if they're in either directory.
# Check to see if the python version is > 3.0 which is currently unsupported
# If so exit with error message
try:
if sys.version_info >= (3,0,0):
msg = "scons: *** SCons version %s does not run under Python version %s.\n\
Python 3.0 and later are not yet supported.\n"
sys.stderr.write(msg % (__version__, sys.version.split()[0]))
sys.exit(1)
except AttributeError:
# Pre-1.6 Python has no sys.version_info
# No need to check version as we then know the version is < 3.0.0 and supported
pass
script_dir = sys.path[0]
if script_dir in sys.path:
sys.path.remove(script_dir)
libs = []
if "SCONS_LIB_DIR" in os.environ:
libs.append(os.environ["SCONS_LIB_DIR"])
local_version = 'scons-local-' + __version__
local = 'scons-local'
if script_dir:
local_version = os.path.join(script_dir, local_version)
local = os.path.join(script_dir, local)
libs.append(os.path.abspath(local_version))
libs.append(os.path.abspath(local))
scons_version = 'scons-%s' % __version__
# preferred order of scons lookup paths
prefs = []
try:
import pkg_resources
except ImportError:
pass
else:
# when running from an egg add the egg's directory
try:
d = pkg_resources.get_distribution('scons')
except pkg_resources.DistributionNotFound:
pass
else:
prefs.append(d.location)
if sys.platform == 'win32':
# sys.prefix is (likely) C:\Python*;
# check only C:\Python*.
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, 'Lib', 'site-packages'))
else:
# On other (POSIX) platforms, things are more complicated due to
# the variety of path names and library locations. Try to be smart
# about it.
if script_dir == 'bin':
# script_dir is `pwd`/bin;
# check `pwd`/lib/scons*.
prefs.append(os.getcwd())
else:
if script_dir == '.' or script_dir == '':
script_dir = os.getcwd()
head, tail = os.path.split(script_dir)
if tail == "bin":
# script_dir is /foo/bin;
# check /foo/lib/scons*.
prefs.append(head)
head, tail = os.path.split(sys.prefix)
if tail == "usr":
# sys.prefix is /foo/usr;
# check /foo/usr/lib/scons* first,
# then /foo/usr/local/lib/scons*.
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, "local"))
elif tail == "local":
h, t = os.path.split(head)
if t == "usr":
# sys.prefix is /foo/usr/local;
# check /foo/usr/local/lib/scons* first,
# then /foo/usr/lib/scons*.
prefs.append(sys.prefix)
prefs.append(head)
else:
# sys.prefix is /foo/local;
# check only /foo/local/lib/scons*.
prefs.append(sys.prefix)
else:
# sys.prefix is /foo (ends in neither /usr or /local);
# check only /foo/lib/scons*.
prefs.append(sys.prefix)
temp = [os.path.join(x, 'lib') for x in prefs]
temp.extend([os.path.join(x,
'lib',
'python' + sys.version[:3],
'site-packages') for x in prefs])
prefs = temp
# Add the parent directory of the current python's library to the
# preferences. On SuSE-91/AMD64, for example, this is /usr/lib64,
# not /usr/lib.
try:
libpath = os.__file__
except AttributeError:
pass
else:
# Split /usr/libfoo/python*/os.py to /usr/libfoo/python*.
libpath, tail = os.path.split(libpath)
# Split /usr/libfoo/python* to /usr/libfoo
libpath, tail = os.path.split(libpath)
# Check /usr/libfoo/scons*.
prefs.append(libpath)
# Look first for 'scons-__version__' in all of our preference libs,
# then for 'scons'.
libs.extend([os.path.join(x, scons_version) for x in prefs])
libs.extend([os.path.join(x, 'scons') for x in prefs])
sys.path = libs + sys.path
##############################################################################
# END STANDARD SCons SCRIPT HEADER
##############################################################################
if __name__ == "__main__":
import SCons.Script
# this does all the work, and calls sys.exit
# with the proper exit status when done.
SCons.Script.main()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Tool.qt4
Tool-specific initialization for Qt4.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001-7,2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Additionally in Photivo :
# Changes done in/for Photivo are largely traceable due to the
# use of pt-Prefixes.
################################################################################
##
## Photivo
##
## Copyright (C) 2013 Jos De Laender <jos@de-laender.be>
##
## This file is part of Photivo.
##
## Photivo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License version 3
## as published by the Free Software Foundation.
##
## Photivo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Photivo. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import os.path
import re
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner
import SCons.Tool
import SCons.Util
import sys
class ToolQt4Warning(SCons.Warnings.Warning):
pass
class GeneratedMocFileNotIncluded(ToolQt4Warning):
pass
class QtdirNotFound(ToolQt4Warning):
pass
SCons.Warnings.enableWarningClass(ToolQt4Warning)
try:
sorted
except NameError:
# Pre-2.4 Python has no sorted() function.
#
# The pre-2.4 Python list.sort() method does not support
# list.sort(key=) nor list.sort(reverse=) keyword arguments, so
# we must implement the functionality of those keyword arguments
# by hand instead of passing them to list.sort().
def sorted(iterable, cmp=None, key=None, reverse=0):
if key is not None:
result = [(key(x), x) for x in iterable]
else:
result = iterable[:]
if cmp is None:
# Pre-2.3 Python does not support list.sort(None).
result.sort()
else:
result.sort(cmp)
if key is not None:
result = [t1 for t0,t1 in result]
if reverse:
result.reverse()
return result
qrcinclude_re = re.compile(r'<file[^>]*>([^<]*)</file>', re.M)
def transformToWinePath(path) :
return os.popen('winepath -w "%s"'%path).read().strip().replace('\\','/')
header_extensions = [".h", ".hxx", ".hpp", ".hh"]
if SCons.Util.case_sensitive_suffixes('.h', '.H'):
header_extensions.append('.H')
# TODO: The following two lines will work when integrated back to SCons
# TODO: Meanwhile the third line will do the work
#cplusplus = __import__('c++', globals(), locals(), [])
#cxx_suffixes = cplusplus.CXXSuffixes
cxx_suffixes = [".c", ".cxx", ".cpp", ".cc"]
def checkMocIncluded(target, source, env):
moc = target[0]
cpp = source[0]
# looks like cpp.includes is cleared before the build stage :-(
# not really sure about the path transformations (moc.cwd? cpp.cwd?) :-/
path = SCons.Defaults.CScan.path_function(env, moc.cwd)
includes = SCons.Defaults.CScan(cpp, env, path)
if not moc in includes:
SCons.Warnings.warn(
GeneratedMocFileNotIncluded,
"Generated moc file '%s' is not included by '%s'" %
(str(moc), str(cpp)))
def find_file(filename, paths, node_factory):
for dir in paths:
node = node_factory(filename, dir)
if node.rexists():
return node
return None
class _Automoc:
"""
Callable class, which works as an emitter for Programs, SharedLibraries and
StaticLibraries.
"""
def __init__(self, objBuilderName):
self.objBuilderName = objBuilderName
# some regular expressions:
# Q_OBJECT detection
self.qo_search = re.compile(r'[^A-Za-z0-9]Q_OBJECT[^A-Za-z0-9]')
# cxx and c comment 'eater'
self.ccomment = re.compile(r'/\*(.*?)\*/',re.S)
self.cxxcomment = re.compile(r'//.*$',re.M)
# we also allow Q_OBJECT in a literal string
self.literal_qobject = re.compile(r'"[^\n]*Q_OBJECT[^\n]*"')
def create_automoc_options(self, env):
"""
Create a dictionary with variables related to Automocing,
based on the current environment.
Is executed once in the __call__ routine.
"""
moc_options = {'auto_scan' : True,
'auto_scan_strategy' : 0,
'gobble_comments' : 0,
'debug' : 0,
'auto_cpppath' : True,
'cpppaths' : []}
try:
if int(env.subst('$QT4_AUTOSCAN')) == 0:
moc_options['auto_scan'] = False
except ValueError:
pass
try:
moc_options['auto_scan_strategy'] = int(env.subst('$QT4_AUTOSCAN_STRATEGY'))
except ValueError:
pass
try:
moc_options['gobble_comments'] = int(env.subst('$QT4_GOBBLECOMMENTS'))
except ValueError:
pass
try:
moc_options['debug'] = int(env.subst('$QT4_DEBUG'))
except ValueError:
pass
try:
if int(env.subst('$QT4_AUTOMOC_SCANCPPPATH')) == 0:
moc_options['auto_cpppath'] = False
except ValueError:
pass
if moc_options['auto_cpppath']:
paths = env.get('QT4_AUTOMOC_CPPPATH', [])
if not paths:
paths = env.get('CPPPATH', [])
moc_options['cpppaths'].extend(paths)
return moc_options
def __automoc_strategy_simple(self, env, moc_options,
cpp, cpp_contents, out_sources):
"""
Default Automoc strategy (Q_OBJECT driven): detect a header file
(alongside the current cpp/cxx) that contains a Q_OBJECT
macro...and MOC it.
If a Q_OBJECT macro is also found in the cpp/cxx itself,
it gets MOCed too.
"""
h=None
for h_ext in header_extensions:
# try to find the header file in the corresponding source
# directory
hname = self.splitext(cpp.name)[0] + h_ext
h = find_file(hname, [cpp.get_dir()]+moc_options['cpppaths'], env.File)
if h:
if moc_options['debug']:
print "scons: qt4: Scanning '%s' (header of '%s')" % (str(h), str(cpp))
h_contents = h.get_contents()
if moc_options['gobble_comments']:
h_contents = self.ccomment.sub('', h_contents)
h_contents = self.cxxcomment.sub('', h_contents)
h_contents = self.literal_qobject.sub('""', h_contents)
break
if not h and moc_options['debug']:
print "scons: qt4: no header for '%s'." % (str(cpp))
if h and self.qo_search.search(h_contents):
# h file with the Q_OBJECT macro found -> add moc_cpp
moc_cpp = env.Moc4(h)
moc_o = self.objBuilder(moc_cpp)
out_sources.extend(moc_o)
if moc_options['debug']:
print "scons: qt4: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(h), str(moc_cpp))
if cpp and self.qo_search.search(cpp_contents):
# cpp file with Q_OBJECT macro found -> add moc
# (to be included in cpp)
moc = env.Moc4(cpp)
env.Ignore(moc, moc)
if moc_options['debug']:
print "scons: qt4: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(cpp), str(moc))
def __automoc_strategy_include_driven(self, env, moc_options,
cpp, cpp_contents, out_sources):
"""
Automoc strategy #1 (include driven): searches for "include"
statements of MOCed files in the current cpp/cxx file.
This strategy tries to add support for the compilation
of the qtsolutions...
"""
if self.splitext(str(cpp))[1] in cxx_suffixes:
added = False
h_moc = "%s%s%s" % (env.subst('$QT4_XMOCHPREFIX'),
self.splitext(cpp.name)[0],
env.subst('$QT4_XMOCHSUFFIX'))
cxx_moc = "%s%s%s" % (env.subst('$QT4_XMOCCXXPREFIX'),
self.splitext(cpp.name)[0],
env.subst('$QT4_XMOCCXXSUFFIX'))
inc_h_moc = r'#include\s+"%s"' % h_moc
inc_cxx_moc = r'#include\s+"%s"' % cxx_moc
# Search for special includes in qtsolutions style
if cpp and re.search(inc_h_moc, cpp_contents):
# cpp file with #include directive for a MOCed header found -> add moc
# Try to find header file
h=None
hname=""
for h_ext in header_extensions:
# Try to find the header file in the
# corresponding source directory
hname = self.splitext(cpp.name)[0] + h_ext
h = find_file(hname, [cpp.get_dir()]+moc_options['cpppaths'], env.File)
if h:
if moc_options['debug']:
print "scons: qt4: Scanning '%s' (header of '%s')" % (str(h), str(cpp))
h_contents = h.get_contents()
if moc_options['gobble_comments']:
h_contents = self.ccomment.sub('', h_contents)
h_contents = self.cxxcomment.sub('', h_contents)
h_contents = self.literal_qobject.sub('""', h_contents)
break
if not h and moc_options['debug']:
print "scons: qt4: no header for '%s'." % (str(cpp))
if h and self.qo_search.search(h_contents):
# h file with the Q_OBJECT macro found -> add moc_cpp
moc_cpp = env.XMoc4(h)
env.Ignore(moc_cpp, moc_cpp)
added = True
# Removing file from list of sources, because it is not to be
# compiled but simply included by the cpp/cxx file.
for idx, s in enumerate(out_sources):
if hasattr(s, "sources") and len(s.sources) > 0:
if str(s.sources[0]) == h_moc:
out_sources.pop(idx)
break
if moc_options['debug']:
print "scons: qt4: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(h), str(h_moc))
else:
if moc_options['debug']:
print "scons: qt4: found no Q_OBJECT macro in '%s', but a moc'ed version '%s' gets included in '%s'" % (str(h), inc_h_moc, cpp.name)
if cpp and re.search(inc_cxx_moc, cpp_contents):
# cpp file with #include directive for a MOCed cxx file found -> add moc
if self.qo_search.search(cpp_contents):
moc = env.XMoc4(target=cxx_moc, source=cpp)
env.Ignore(moc, moc)
added = True
if moc_options['debug']:
print "scons: qt4: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(cpp), str(moc))
else:
if moc_options['debug']:
print "scons: qt4: found no Q_OBJECT macro in '%s', although a moc'ed version '%s' of itself gets included" % (cpp.name, inc_cxx_moc)
if not added:
# Fallback to default Automoc strategy (Q_OBJECT driven)
self.__automoc_strategy_simple(env, moc_options, cpp,
cpp_contents, out_sources)
def __call__(self, target, source, env):
"""
Smart autoscan function. Gets the list of objects for the Program
or Lib. Adds objects and builders for the special qt4 files.
"""
moc_options = self.create_automoc_options(env)
# some shortcuts used in the scanner
self.splitext = SCons.Util.splitext
self.objBuilder = getattr(env, self.objBuilderName)
# The following is kind of hacky to get builders working properly (FIXME)
objBuilderEnv = self.objBuilder.env
self.objBuilder.env = env
mocBuilderEnv = env.Moc4.env
env.Moc4.env = env
xMocBuilderEnv = env.XMoc4.env
env.XMoc4.env = env
# make a deep copy for the result; MocH objects will be appended
out_sources = source[:]
for obj in source:
if not moc_options['auto_scan']:
break
if isinstance(obj,basestring): # big kludge!
print "scons: qt4: '%s' MAYBE USING AN OLD SCONS VERSION AND NOT CONVERTED TO 'File'. Discarded." % str(obj)
continue
if not obj.has_builder():
# binary obj file provided
if moc_options['debug']:
print "scons: qt4: '%s' seems to be a binary. Discarded." % str(obj)
continue
cpp = obj.sources[0]
if not self.splitext(str(cpp))[1] in cxx_suffixes:
if moc_options['debug']:
print "scons: qt4: '%s' is no cxx file. Discarded." % str(cpp)
# c or fortran source
continue
try:
cpp_contents = cpp.get_contents()
if moc_options['gobble_comments']:
cpp_contents = self.ccomment.sub('', cpp_contents)
cpp_contents = self.cxxcomment.sub('', cpp_contents)
cpp_contents = self.literal_qobject.sub('""', cpp_contents)
except: continue # may be an still not generated source
if moc_options['auto_scan_strategy'] == 0:
# Default Automoc strategy (Q_OBJECT driven)
self.__automoc_strategy_simple(env, moc_options,
cpp, cpp_contents, out_sources)
else:
# Automoc strategy #1 (include driven)
self.__automoc_strategy_include_driven(env, moc_options,
cpp, cpp_contents, out_sources)
# restore the original env attributes (FIXME)
self.objBuilder.env = objBuilderEnv
env.Moc4.env = mocBuilderEnv
env.XMoc4.env = xMocBuilderEnv
# We return the set of source entries as sorted sequence, else
# the order might accidentally change from one build to another
# and trigger unwanted rebuilds. For proper sorting, a key function
# has to be specified...FS.Entry (and Base nodes in general) do not
# provide a __cmp__, for performance reasons.
return (target, sorted(set(out_sources), key=lambda entry : str(entry)))
AutomocShared = _Automoc('SharedObject')
AutomocStatic = _Automoc('StaticObject')
def _detect(env):
"""Not really safe, but fast method to detect the Qt4 library"""
# TODO: check output of "moc -v" for correct version >= 4.0.0
try: return env['QT4DIR']
except KeyError: pass
try: return env['QTDIR']
except KeyError: pass
try: return os.environ['QT4DIR']
except KeyError: pass
try: return os.environ['QTDIR']
except KeyError: pass
moc = env.WhereIs('moc-qt4') or env.WhereIs('moc4') or env.WhereIs('moc')
if moc:
QT4DIR = os.path.dirname(os.path.dirname(moc))
SCons.Warnings.warn(
QtdirNotFound,
"QT4DIR variable is not defined, using moc executable as a hint (QT4DIR=%s)" % QT4DIR)
return QT4DIR
raise SCons.Errors.StopError(
QtdirNotFound,
"Could not detect Qt 4 installation")
return None
def __scanResources(node, env, path, arg):
# Helper function for scanning .qrc resource files
# I've been careful on providing names relative to the qrc file
# If that was not needed this code could be simplified a lot
def recursiveFiles(basepath, path) :
result = []
for item in os.listdir(os.path.join(basepath, path)) :
itemPath = os.path.join(path, item)
if os.path.isdir(os.path.join(basepath, itemPath)) :
result += recursiveFiles(basepath, itemPath)
else:
result.append(itemPath)
return result
contents = node.get_contents()
includes = qrcinclude_re.findall(contents)
qrcpath = os.path.dirname(node.path)
dirs = [included for included in includes if os.path.isdir(os.path.join(qrcpath,included))]
# dirs need to include files recursively
for dir in dirs :
includes.remove(dir)
includes+=recursiveFiles(qrcpath,dir)
return includes
#
# Scanners
#
__qrcscanner = SCons.Scanner.Scanner(name = 'qrcfile',
function = __scanResources,
argument = None,
skeys = ['.qrc'])
#
# Emitters
#
def __qrc_path(head, prefix, tail, suffix):
if head:
if tail:
return os.path.join(head, "%s%s%s" % (prefix, tail, suffix))
else:
return "%s%s%s" % (prefix, head, suffix)
else:
return "%s%s%s" % (prefix, tail, suffix)
def __qrc_emitter(target, source, env):
sourceBase, sourceExt = os.path.splitext(SCons.Util.to_String(source[0]))
sHead = None
sTail = sourceBase
if sourceBase:
sHead, sTail = os.path.split(sourceBase)
t = __qrc_path(sHead, env.subst('$QT4_QRCCXXPREFIX'),
sTail, env.subst('$QT4_QRCCXXSUFFIX'))
return t, source
#
# Action generators
#
def __moc_generator_from_h(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT4_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return '$QT4_MOC $QT4_MOCDEFINES $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE'
else:
return '$QT4_MOC $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE'
def __moc_generator_from_cxx(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT4_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return ['$QT4_MOC $QT4_MOCDEFINES $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
else:
return ['$QT4_MOC $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
def __mocx_generator_from_h(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT4_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return '$QT4_MOC $QT4_MOCDEFINES $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE'
else:
return '$QT4_MOC $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE'
def __mocx_generator_from_cxx(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT4_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return ['$QT4_MOC $QT4_MOCDEFINES $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
else:
return ['$QT4_MOC $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
def __qrc_generator(source, target, env, for_signature):
name_defined = False
try:
if env.subst('$QT4_QRCFLAGS').find('-name') >= 0:
name_defined = True
except ValueError:
pass
if name_defined:
return '$QT4_RCC $QT4_QRCFLAGS $SOURCE -o $TARGET'
else:
qrc_suffix = env.subst('$QT4_QRCSUFFIX')
src = str(source[0])
head, tail = os.path.split(src)
if tail:
src = tail
qrc_suffix = env.subst('$QT4_QRCSUFFIX')
if src.endswith(qrc_suffix):
qrc_stem = src[:-len(qrc_suffix)]
else:
qrc_stem = src
return '$QT4_RCC $QT4_QRCFLAGS -name %s $SOURCE -o $TARGET' % qrc_stem
#
# Builders
#
__ts_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$QT4_LUPDATECOM','$QT4_LUPDATECOMSTR'),
suffix = '.ts',
source_factory = SCons.Node.FS.Entry)
__qm_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$QT4_LRELEASECOM','$QT4_LRELEASECOMSTR'),
src_suffix = '.ts',
suffix = '.qm')
__qrc_builder = SCons.Builder.Builder(
action = SCons.Action.CommandGeneratorAction(__qrc_generator,
{"cmdstr":"QT4_QRCCOMSTR"}),
source_scanner = __qrcscanner,
src_suffix = '$QT4_QRCSUFFIX',
suffix = '$QT4_QRCCXXSUFFIX',
prefix = '$QT4_QRCCXXPREFIX',
single_source = 1)
__ex_moc_builder = SCons.Builder.Builder(
action = SCons.Action.CommandGeneratorAction(__moc_generator_from_h,
{"cmdstr":"$QT4_MOCFROMHCOMSTR"}))
__ex_uic_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$QT4_UICCOM', '$QT4_UICCOMSTR'),
src_suffix = '.ui')
#
# Wrappers (pseudo-Builders)
#
def Ts4(env, target, source=None, *args, **kw):
"""
A pseudo-Builder wrapper around the LUPDATE executable of Qt4.
lupdate [options] [source-file|path]... -ts ts-files
"""
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
if not SCons.Util.is_List(source):
source = [source]
# Check QT4_CLEAN_TS and use NoClean() function
clean_ts = False
try:
if int(env.subst('$QT4_CLEAN_TS')) == 1:
clean_ts = True
except ValueError:
pass
result = []
for t in target:
obj = __ts_builder.__call__(env, t, source, **kw)
# Prevent deletion of the .ts file, unless explicitly specified
if not clean_ts:
env.NoClean(obj)
# Always make our target "precious", such that it is not deleted
# prior to a rebuild
env.Precious(obj)
# Add to resulting target list
result.extend(obj)
return result
def Qm4(env, target, source=None, *args, **kw):
"""
A pseudo-Builder wrapper around the LRELEASE executable of Qt4.
lrelease [options] ts-files [-qm qm-file]
"""
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t in target:
result.extend(__qm_builder.__call__(env, t, source, **kw))
return result
def Qrc4(env, target, source=None, *args, **kw):
"""
A pseudo-Builder wrapper around the RCC executable of Qt4.
rcc [options] qrc-files -o out-file
"""
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t, s in zip(target, source):
result.extend(__qrc_builder.__call__(env, t, s, **kw))
return result
def ExplicitMoc4(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the MOC executable of Qt4.
moc [options] <header-file>
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t in target:
# Is it a header or a cxx file?
result.extend(__ex_moc_builder.__call__(env, t, source, **kw))
return result
def ExplicitUic4(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the UIC executable of Qt4.
uic [options] <uifile>
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t in target:
result.extend(__ex_uic_builder.__call__(env, t, source, **kw))
return result
def generate(env):
"""Add Builders and construction variables for qt4 to an Environment."""
def locateQt4Command(env, command, qtdir) :
# Take cross into account.
ptCrossCommand = env['PT_CROSS'] + command
# Decorations (linux and msys/mingw/cygwin accept. env in linux)
# We issue something like 'ID=DSCONS_UIC /path/to/uic'
# This is just for recognizing at command print time.
ptDecoration = ''
if command == 'moc':
ptDecoration = 'ID=DSCONS_MOC'
elif command == 'uic' :
ptDecoration = 'ID=DSCONS_UIC'
elif command == 'rcc' :
ptDecoration = 'ID=DSCONS_RCC'
elif command == 'lupdate' :
ptDecoration = 'ID=DSCONS_LUPDATE'
elif command == 'lrelease' :
ptDecoration = 'ID=DSCONS_LRELEASE'
if sys.platform.startswith('win') :
ptDecoration = ''
suffixes = [
'-qt4',
'-qt4.exe',
'4',
'4.exe',
'',
'.exe',
]
triedPaths = []
for suffix in suffixes :
fullpath = os.path.join(qtdir,'bin',ptCrossCommand + suffix)
if os.access(fullpath, os.X_OK) :
return ptDecoration + ' ' + fullpath
triedPaths.append(fullpath)
fullpath = env.Detect([ptCrossCommand+'-qt4',
ptCrossCommand+'4',
ptCrossCommand])
if not (fullpath is None) : return ptDecoration + ' ' + fullpath
if command in ('lupdate','lrelease'):
print 'Qt4 could not locate \'' + \
ptCrossCommand + '\' ' + \
'(This might be acceptable)'
return None
raise Exception("Qt4 command '" + command + "' not found. Tried: " + ', '.join(triedPaths))
CLVar = SCons.Util.CLVar
Action = SCons.Action.Action
Builder = SCons.Builder.Builder
env['QT4DIR'] = _detect(env)
# TODO: 'Replace' should be 'SetDefault'
# env.SetDefault(
env.Replace(
QT4DIR = _detect(env),
QT4_BINPATH = os.path.join('$QT4DIR', 'bin'),
QT4_LIBPATH = os.path.join('$QT4DIR', 'lib'),
# TODO: This is not reliable to QT4DIR value changes but needed in order to support '-qt4' variants
QT4_MOC = locateQt4Command(env,'moc', env['QT4DIR']),
QT4_UIC = locateQt4Command(env,'uic', env['QT4DIR']),
QT4_RCC = locateQt4Command(env,'rcc', env['QT4DIR']),
QT4_LUPDATE = locateQt4Command(env,'lupdate', env['QT4DIR']),
QT4_LRELEASE = locateQt4Command(env,'lrelease', env['QT4DIR']),
QT4_AUTOSCAN = 1, # Should the qt4 tool try to figure out, which sources are to be moc'ed?
QT4_AUTOSCAN_STRATEGY = 0, # While scanning for files to moc, should we search for includes in qtsolutions style?
QT4_GOBBLECOMMENTS = 0, # If set to 1, comments are removed before scanning cxx/h files.
QT4_CPPDEFINES_PASSTOMOC = 1, # If set to 1, all CPPDEFINES get passed to the moc executable.
QT4_CLEAN_TS = 0, # If set to 1, translation files (.ts) get cleaned on 'scons -c'
QT4_AUTOMOC_SCANCPPPATH = 1, # If set to 1, the CPPPATHs (or QT4_AUTOMOC_CPPPATH) get scanned for moc'able files
QT4_AUTOMOC_CPPPATH = [], # Alternative paths that get scanned for moc files
# Some Qt4 specific flags. I don't expect someone wants to
# manipulate those ...
QT4_UICFLAGS = CLVar(''),
QT4_MOCFROMHFLAGS = CLVar(''),
QT4_MOCFROMCXXFLAGS = CLVar('-i'),
QT4_QRCFLAGS = '',
QT4_LUPDATEFLAGS = '',
QT4_LRELEASEFLAGS = '',
# suffixes/prefixes for the headers / sources to generate
QT4_UISUFFIX = '.ui',
QT4_UICDECLPREFIX = 'ui_',
QT4_UICDECLSUFFIX = '.h',
QT4_MOCINCPREFIX = '-I',
QT4_MOCHPREFIX = 'moc_',
QT4_MOCHSUFFIX = '$CXXFILESUFFIX',
QT4_MOCCXXPREFIX = '',
QT4_MOCCXXSUFFIX = '.moc',
QT4_QRCSUFFIX = '.qrc',
QT4_QRCCXXSUFFIX = '$CXXFILESUFFIX',
QT4_QRCCXXPREFIX = 'qrc_',
QT4_MOCDEFPREFIX = '-D',
QT4_MOCDEFSUFFIX = '',
QT4_MOCDEFINES = '${_defines(QT4_MOCDEFPREFIX, CPPDEFINES, QT4_MOCDEFSUFFIX, __env__)}',
QT4_MOCCPPPATH = [],
QT4_MOCINCFLAGS = '$( ${_concat(QT4_MOCINCPREFIX, QT4_MOCCPPPATH, INCSUFFIX, __env__, RDirs)} $)',
# Commands for the qt4 support ...
QT4_UICCOM = '$QT4_UIC $QT4_UICFLAGS -o $TARGET $SOURCE',
QT4_LUPDATECOM = '$QT4_LUPDATE $QT4_LUPDATEFLAGS $SOURCES -ts $TARGET',
QT4_LRELEASECOM = '$QT4_LRELEASE $QT4_LRELEASEFLAGS -qm $TARGET $SOURCES',
# Specialized variables for the Extended Automoc support
# (Strategy #1 for qtsolutions)
QT4_XMOCHPREFIX = 'moc_',
QT4_XMOCHSUFFIX = '.cpp',
QT4_XMOCCXXPREFIX = '',
QT4_XMOCCXXSUFFIX = '.moc',
)
try:
env.AddMethod(Ts4, "Ts4")
env.AddMethod(Qm4, "Qm4")
env.AddMethod(Qrc4, "Qrc4")
env.AddMethod(ExplicitMoc4, "ExplicitMoc4")
env.AddMethod(ExplicitUic4, "ExplicitUic4")
except AttributeError:
# Looks like we use a pre-0.98 version of SCons...
from SCons.Script.SConscript import SConsEnvironment
SConsEnvironment.Ts4 = Ts4
SConsEnvironment.Qm4 = Qm4
SConsEnvironment.Qrc4 = Qrc4
SConsEnvironment.ExplicitMoc4 = ExplicitMoc4
SConsEnvironment.ExplicitUic4 = ExplicitUic4
# Interface builder
uic4builder = Builder(
action = SCons.Action.Action('$QT4_UICCOM', '$QT4_UICCOMSTR'),
src_suffix='$QT4_UISUFFIX',
suffix='$QT4_UICDECLSUFFIX',
prefix='$QT4_UICDECLPREFIX',
single_source = True
#TODO: Consider the uiscanner on new scons version
)
env['BUILDERS']['Uic4'] = uic4builder
# Metaobject builder
mocBld = Builder(action={}, prefix={}, suffix={})
for h in header_extensions:
act = SCons.Action.CommandGeneratorAction(__moc_generator_from_h,
{"cmdstr":"$QT4_MOCFROMHCOMSTR"})
mocBld.add_action(h, act)
mocBld.prefix[h] = '$QT4_MOCHPREFIX'
mocBld.suffix[h] = '$QT4_MOCHSUFFIX'
for cxx in cxx_suffixes:
act = SCons.Action.CommandGeneratorAction(__moc_generator_from_cxx,
{"cmdstr":"$QT4_MOCFROMCXXCOMSTR"})
mocBld.add_action(cxx, act)
mocBld.prefix[cxx] = '$QT4_MOCCXXPREFIX'
mocBld.suffix[cxx] = '$QT4_MOCCXXSUFFIX'
env['BUILDERS']['Moc4'] = mocBld
# Metaobject builder for the extended auto scan feature
# (Strategy #1 for qtsolutions)
xMocBld = Builder(action={}, prefix={}, suffix={})
for h in header_extensions:
act = SCons.Action.CommandGeneratorAction(__mocx_generator_from_h,
{"cmdstr":"$QT4_MOCFROMHCOMSTR"})
xMocBld.add_action(h, act)
xMocBld.prefix[h] = '$QT4_XMOCHPREFIX'
xMocBld.suffix[h] = '$QT4_XMOCHSUFFIX'
for cxx in cxx_suffixes:
act = SCons.Action.CommandGeneratorAction(__mocx_generator_from_cxx,
{"cmdstr":"$QT4_MOCFROMCXXCOMSTR"})
xMocBld.add_action(cxx, act)
xMocBld.prefix[cxx] = '$QT4_XMOCCXXPREFIX'
xMocBld.suffix[cxx] = '$QT4_XMOCCXXSUFFIX'
env['BUILDERS']['XMoc4'] = xMocBld
# Add the Qrc4 action to the CXX file builder (registers the
# *.qrc extension with the Environment)
cfile_builder, cxxfile_builder = SCons.Tool.createCFileBuilders(env)
qrc_act = SCons.Action.CommandGeneratorAction(__qrc_generator,
{"cmdstr":"$QT4_QRCCOMSTR"})
cxxfile_builder.add_action('$QT4_QRCSUFFIX', qrc_act)
cxxfile_builder.add_emitter('$QT4_QRCSUFFIX', __qrc_emitter)
# We use the emitters of Program / StaticLibrary / SharedLibrary
# to scan for moc'able files
# We can't refer to the builders directly, we have to fetch them
# as Environment attributes because that sets them up to be called
# correctly later by our emitter.
env.AppendUnique(PROGEMITTER =[AutomocStatic],
SHLIBEMITTER=[AutomocShared],
LIBEMITTER =[AutomocStatic],
)
# TODO: Does dbusxml2cpp need an adapter
try:
env.AddMethod(enable_modules, "EnableQt4Modules")
except AttributeError:
# Looks like we use a pre-0.98 version of SCons...
from SCons.Script.SConscript import SConsEnvironment
SConsEnvironment.EnableQt4Modules = enable_modules
def enable_modules(self, modules, debug=False) :
import sys
validModules = [
'QtCore',
'QtGui',
'QtOpenGL',
'Qt3Support',
'QtAssistant', # deprecated
'QtAssistantClient',
'QtScript',
'QtDBus',
'QtSql',
'QtSvg',
# The next modules have not been tested yet so, please
# maybe they require additional work on non Linux platforms
'QtNetwork',
'QtTest',
'QtXml',
'QtXmlPatterns',
'QtUiTools',
'QtDesigner',
'QtDesignerComponents',
'QtWebKit',
'QtHelp',
'QtScript',
'QtScriptTools',
'QtMultimedia',
]
pclessModules = [
# in qt <= 4.3 designer and designerComponents are pcless, on qt4.4 they are not, so removed.
# 'QtDesigner',
# 'QtDesignerComponents',
]
staticModules = [
'QtUiTools',
]
invalidModules=[]
for module in modules:
if module not in validModules :
invalidModules.append(module)
if invalidModules :
raise Exception("Modules %s are not Qt4 modules. Valid Qt4 modules are: %s"% (
str(invalidModules),str(validModules)))
moduleDefines = {
'QtScript' : ['QT_SCRIPT_LIB'],
'QtSvg' : ['QT_SVG_LIB'],
'Qt3Support' : ['QT_QT3SUPPORT_LIB','QT3_SUPPORT'],
'QtSql' : ['QT_SQL_LIB'],
'QtXml' : ['QT_XML_LIB'],
'QtOpenGL' : ['QT_OPENGL_LIB'],
'QtGui' : ['QT_GUI_LIB'],
'QtNetwork' : ['QT_NETWORK_LIB'],
'QtCore' : ['QT_CORE_LIB'],
}
for module in modules :
try : self.AppendUnique(CPPDEFINES=moduleDefines[module])
except: pass
debugSuffix = ''
if sys.platform in ["darwin", "linux2", "win32"] :
if debug :
if sys.platform in ["win32"] :
debugSuffix = 'd'
else :
debugSuffix = '_debug'
for module in modules :
if module not in pclessModules : continue
self.AppendUnique(LIBS=[module+debugSuffix])
self.AppendUnique(LIBPATH=[os.path.join("$QT4DIR","lib")])
self.AppendUnique(CPPPATH=[os.path.join("$QT4DIR","include","qt4")])
self.AppendUnique(CPPPATH=[os.path.join("$QT4DIR","include","qt4",module)])
pcmodules = [module+debugSuffix for module in modules if module not in pclessModules ]
if 'QtDBus' in pcmodules:
self.AppendUnique(CPPPATH=[os.path.join("$QT4DIR","include","qt4","QtDBus")])
if "QtAssistant" in pcmodules:
self.AppendUnique(CPPPATH=[os.path.join("$QT4DIR","include","qt4","QtAssistant")])
self["QT4_MOCCPPPATH"] = self["CPPPATH"]
return
else :
print "CHECK ME. SHOULDN'T"
Exit(1)
def exists(env):
return _detect(env)
| Python |
#! /usr/bin/env python
import os
from os.path import exists
import shutil
#name of photivo directory
photivo_dir='photivo-s'
home_dir = os.getenv("HOME")
source_dir=home_dir +'/'+photivo_dir
target_dir = home_dir +'/'+photivo_dir + "/Photivo.app/Contents"
AppBundle= home_dir + '/' + photivo_dir + "/Photivo.app"
os.makedirs(target_dir)
shutil.copy(source_dir+'/Info.plist', target_dir)
os.chdir( target_dir )
dir_tree = 'Frameworks/MacOS/Resources'
tree_group = dir_tree.split("/")
for item in tree_group: # Removes any empty strings from the list
if item == "":
tree_group.remove(item)
#os.mkdir(item)
if item=='MacOS':
os.mkdir(item)
for elm in ['Curves','LensfunDatabase','ChannelMixers','Presets','Profiles','Themes','photivo']:
if os.path.isdir(source_dir + '/' + elm) == True:
print elm + ' dir copied'
shutil.copytree(source_dir + '/' + elm, item + '/' + elm)
#distutils.dir_util.copy_tree
else:
print elm + ' file copied'
shutil.copy(source_dir + '/' + elm, item)
elif item=='Resources':
os.mkdir(item)
shutil.copy(source_dir+'/photivo-appicon.icns', item)
else:
os.mkdir(item)
#print AppBundle
from subprocess import call
if call('/usr/bin/macdeployqt ' + AppBundle , shell=True)==0:
print 'Bundle building finished!'
else:
print 'There was a problem building the bundle'
| Python |
################################################################################
##
## Photivo
##
## Copyright (C) 2013 Jos De Laender <jos@de-laender.be>
##
## This file is part of Photivo.
##
## Photivo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License version 3
## as published by the Free Software Foundation.
##
## Photivo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Photivo. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import tempfile
import subprocess
import shutil
################################################################################
# Constants.
ptNoAttrs = ''
ptBold = ''
ptNegative = ''
ptBlack = ''
ptRed = ''
ptGreen = ''
ptYellow = ''
ptBlue = ''
ptMagenta = ''
ptCyan = ''
ptWhite = ''
ptBoldRed = ''
ptBoldGreen = ''
ptBoldYellow = ''
ptBoldBlue = ''
ptBoldMagenta= ''
ptBoldCyan = ''
ptBoldWhite = ''
# Do we have colors in win32 ?
ptHaveColors = True
if sys.platform in ['win32'] :
ptHaveColors = False
try:
from colorama import init
init()
ptHaveColors = True
except :
print '\nTIP : Installing colorama would give you coloured output.\n'
pass
if ptHaveColors and sys.stdout.isatty() :
ptNoAttrs = '\033[0m'
ptBold = '\033[1m'
ptNegative = '\033[7m'
ptBlack = '\033[30m'
ptRed = '\033[31m'
ptGreen = '\033[32m'
ptYellow = '\033[33m'
ptBlue = '\033[34m'
ptMagenta = '\033[35m'
ptCyan = '\033[36m'
ptWhite = '\033[37m'
ptBoldRed = '\033[1;31m'
ptBoldGreen = '\033[1;32m'
ptBoldYellow = '\033[1;33m'
ptBoldBlue = '\033[1;34m'
ptBoldMagenta= '\033[1;35m'
ptBoldCyan = '\033[1;36m'
ptBoldWhite = '\033[1;37m'
################################################################################
# Prints to screen and to log. With color to screen.
def ptPrintLog(DoPrint,LogFile,Color,Message):
if DoPrint :
MyMessage = Color + Message + ptNoAttrs
print MyMessage
LogFile.write(Message + '\n')
return None
################################################################################
# Check for sufficient GCC. Simply returns true if OK.
def ptCheckGCCVersion(BuildEnv,MinVersion):
ptPrintLog(True,BuildEnv['PT_LOGFILE'],
ptBoldBlue,
'Checking for GCC >= ' + MinVersion + ' ... ')
# Make sure we work with correct and minimum os.environ. Save previous.
ptSavedEnviron = dict(os.environ)
os.environ.clear()
for Key in BuildEnv['ENV'].keys():
os.environ[Key] = BuildEnv['ENV'][Key]
ptCC = BuildEnv['CC']
ptCXX = BuildEnv['CXX']
ptCCVersion = os.popen(ptCC + ' -dumpversion').read().rstrip().split('.')
ptCXXVersion = os.popen(ptCXX + ' -dumpversion').read().rstrip().split('.')
# Restpre env
os.environ.clear()
os.environ.update(ptSavedEnviron)
ptMinVersion = MinVersion.split('.')
if (ptCCVersion[0] > ptMinVersion[0] and ptCXXVersion[0] > ptMinVersion[0]):
return True;
if (ptCCVersion[0] < ptMinVersion[0] or ptCXXVersion[0] < ptMinVersion[0]):
return False;
if (ptCCVersion[1] > ptMinVersion[1] and ptCXXVersion[1] > ptMinVersion[1]):
return True;
if (ptCCVersion[1] < ptMinVersion[1] or ptCXXVersion[1] < ptMinVersion[1]):
return False;
# Some report 4.7 for 4.7.2
try:
if (ptCCVersion[2] > ptMinVersion[2] and ptCXXVersion[2] > ptMinVersion[2]):
return True;
if (ptCCVersion[2] < ptMinVersion[2] or ptCXXVersion[2] < ptMinVersion[2]):
return False;
except IndexError:
pass
return True;
################################################################################
# Get GCC/CXX version
def ptGetGCCVersion(BuildEnv) :
# Make sure we work with correct and minimum os.environ. Save previous.
ptSavedEnviron = dict(os.environ)
os.environ.clear()
for Key in BuildEnv['ENV'].keys():
os.environ[Key] = BuildEnv['ENV'][Key]
ptCC = BuildEnv.WhereIs(BuildEnv['CC'])
ptCXX = BuildEnv.WhereIs(BuildEnv['CXX'])
ptCCVersion = os.popen(ptCC + ' -dumpversion').read().rstrip()
ptCXXVersion = os.popen(ptCXX + ' -dumpversion').read().rstrip()
# Restore env
os.environ.clear()
os.environ.update(ptSavedEnviron)
return[ptCCVersion,ptCXXVersion]
################################################################################
# Check hg
def ptCheckHg(Context):
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for hg ... ')
Ret=Context.TryAction('hg')[0]
Context.Result(Ret)
return Ret
################################################################################
# Get AppVersion
def ptGetAppVersion():
ptHgRev = os.popen('hg identify').read()[:11]
ptChanged = os.popen('hg identify').read()[12]
ptAppVer = os.popen(
'hg log --rev ' + ptHgRev + \
' --template "{date|shortdate} (rev {node|short})"').read()
return ptAppVer + ptChanged
################################################################################
# Get the package version and flags for packages handled by pkg-config
def ptGetPKGOutput(Context,Name):
# Make sure we work with correct and minimum os.environ. Save previous.
ptSavedEnviron = dict(os.environ)
os.environ.clear()
for Key in Context.env['ENV'].keys():
os.environ[Key] = Context.env['ENV'][Key]
ptPkgConfig = Context.env['PT_CROSS'] + 'pkg-config'
ptVersion = os.popen(ptPkgConfig + ' --modversion ' + Name).read().rstrip()
ptFlags = os.popen(ptPkgConfig + ' --cflags --libs ' + Name).read().rstrip()
# Restore env
os.environ.clear()
os.environ.update(ptSavedEnviron)
return [ptVersion,ptFlags]
################################################################################
def ptCheckPKGConfig(Context,MinVersion):
ptPkgConfig = Context.env['PT_CROSS'] + 'pkg-config'
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for ' + ptPkgConfig + ' ... ')
Ret=Context.TryAction(
ptPkgConfig + ' --atleast-pkgconfig-version=' + MinVersion)[0]
Context.Result(Ret)
return Ret
################################################################################
def ptCheckPKG(Context,Name):
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for ' + Name + '... ')
ptPkgConfig = Context.env['PT_CROSS'] + 'pkg-config'
if sys.platform in ['win32'] :
ptCommand = ptPkgConfig + ' --exists %s' % Name
# WIN32 shell escape of >
ptCommand = ptCommand.replace(">","^>")
else :
ptCommand = ptPkgConfig + ' --exists \'%s\'' % Name
Ret = Context.TryAction(ptCommand)[0]
Context.Result(Ret)
return Ret
################################################################################
def ptCheckLibWithHeader(Context,Lib,Header,Language):
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for ' + Lib + ' (' + Header + ')... ')
Ret = Context.sconf.CheckLibWithHeader(Lib,Header,Language)
Context.Result(Ret)
return Ret
################################################################################
# custom check on libjpeg version
def ptCheckLibJpeg(Context,MinVersion,MaxVersion):
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for libjpeg between ' + str(MinVersion) +
' and ' + str(MaxVersion) + '... ')
ptProgram = """
#include <stdlib.h>
#include <stdio.h>
#define JPEG_LIB_VERSION 0
#include <jpeglib.h>
int main() {
printf("%d",JPEG_LIB_VERSION);
return 0;
}
"""
Ret = Context.TryCompile(ptProgram, '.c')
if Ret == 0:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Failing test. Cannot compile test program:')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,ptProgram)
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
Ret = Context.TryRun(ptProgram, '.c')
if Ret[0] == 0:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Failing test. Cannot run test program:')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,ptProgram)
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
ptVersion = int(Ret[1])
OK = not (ptVersion < MinVersion or ptVersion > MaxVersion)
if not OK:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'libjpeg version : ' + str(Ret[1]) + ' should be between ' +
str(MinVersion) + ' and ' + str(MaxVersion))
Context.Result(OK)
return OK
################################################################################
def ptCheckQt(Context,MinVersion):
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for Qt >= ' + MinVersion + '... ')
# Make sure we work with correct and minimum os.environ. Save previous.
ptSavedEnviron = dict(os.environ)
os.environ.clear()
for Key in Context.env['ENV'].keys():
os.environ[Key] = Context.env['ENV'][Key]
# Locate qmake. Taking QT4DIR into account.
qmake_1 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake'
qmake_2 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake.exe'
qmake_3 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake-qt4'
qmake_4 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake-qt4.exe'
qmakes = [qmake_1,qmake_2,qmake_3,qmake_4]
qmake = ''
for qm in qmakes :
if os.path.exists(qm) :
qmake = qm
break
if not qmake:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Cannot locate qmake.')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
# Locate make
make = Context.env.WhereIs('make')
if not make:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Cannot locate make.')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
# Check version
ptQtVersion = \
os.popen(qmake + ' -query QT_VERSION').read().rstrip().split('.')
ptQtMinVersion = MinVersion.split('.')
if ptQtVersion[0] < ptQtMinVersion[0] :
Context.Result(False)
return False
if ptQtVersion[0] > ptQtMinVersion[0] :
Context.Result(True)
return True
if ptQtVersion[1] < ptQtMinVersion[1] :
Context.Result(False)
return False
if ptQtVersion[1] > ptQtMinVersion[1] :
Context.Result(True)
return True
if ptQtVersion[2] < ptQtMinVersion[2] :
Context.Result(False)
return False
if ptQtVersion[2] > ptQtMinVersion[2] :
Context.Result(True)
return True
Context.Result(True)
return True
################################################################################
# Determine Qt Compile and Link parameters via a qmake run on test program.
def ptGetQtOutput(Context):
# Make sure we work with correct and minimum os.environ. Save previous.
ptSavedEnviron = dict(os.environ)
os.environ.clear()
for Key in Context.env['ENV'].keys():
os.environ[Key] = Context.env['ENV'][Key]
# Locate qmake. Taking QT4DIR into account.
qmake_1 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake'
qmake_2 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake.exe'
qmake_3 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake-qt4'
qmake_4 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake-qt4.exe'
qmakes = [qmake_1,qmake_2,qmake_3,qmake_4]
qmake = ''
for qm in qmakes :
if os.path.exists(qm) :
qmake = qm
break
if not qmake:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Cannot locate qmake.')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
# Locate make
make = Context.env.WhereIs('make')
if not make:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Cannot locate make.')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
# Version
ptQtVersion = os.popen(qmake + ' -query QT_VERSION').read().rstrip()
# Analyze output of qmake/make combo
ptCurDir = os.getcwd()
ptTmpDir = tempfile.mkdtemp()
ptProgram = """
int main() {
return 0;
}
"""
with open(ptTmpDir + os.sep + 'FooTest.cpp','w') as f :
f.write(ptProgram)
with open(ptTmpDir + os.sep + 'FooTest.pro','w') as f :
f.write('CONFIG -= DEBUG\n')
f.write('CONFIG -= RELEASE\n')
if Context.env['PT_RELEASE'] :
f.write('CONFIG += RELEASE\n')
else :
f.write('CONFIG += DEBUG\n')
f.write('QT += core\n')
f.write('QT += gui\n')
f.write('QT += network\n')
f.write('SOURCES = FooTest.cpp\n')
os.chdir(ptTmpDir)
os.popen(qmake)
ptMakeOutput = os.popen(make).read().rstrip().split('\n')
# Analyze output to determine flags.
ptCompileFlags = ''
ptLinkFlags = ''
for Line in ptMakeOutput:
if 'FooTest.cpp' in Line :
# Assuming compile step.
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nFoo compile line : ' + Line + '\n')
ptLocalEnv = Environment()
ptParsedFlags = ptLocalEnv.ParseFlags(Line)
for ptFlag in ptParsedFlags['CPPDEFINES'] :
ptCompileFlags += ' ' + '-D' + ptFlag
for ptFlag in ptParsedFlags['CCFLAGS'] :
if ptFlag.startswith('-f') or ptFlag.startswith('-m'):
ptCompileFlags += ' ' + ptFlag
for ptPath in ptParsedFlags['CPPPATH'] :
if ptPath in ['.','debug','release'] :
continue
ptCompileFlags += ' -I' + os.path.abspath(ptPath).replace("\\","/")
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nptParsedFlags : ' + str(ptParsedFlags) + '\n')
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nptCompileFlags : ' + ptCompileFlags + '\n')
elif 'FooTest' in Line :
# Assuming link step.
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nFoo link line : ' + Line + '\n')
ptLocalEnv = Environment()
ptParsedFlags = ptLocalEnv.ParseFlags(Line)
for ptFlag in ptParsedFlags['LINKFLAGS'] :
ptLinkFlags += ' ' + ptFlag
for ptFlag in ptParsedFlags['CCFLAGS'] :
if ptFlag.startswith('-f') or ptFlag.startswith('-m'):
ptLinkFlags += ' ' + ptFlag
for ptPath in ptParsedFlags['LIBPATH'] :
ptLinkFlags += ' -L' + os.path.abspath(ptPath).replace("\\","/")
for ptLib in ptParsedFlags['LIBS'] :
try:
ptLinkFlags += ' -l' + ptLib
except TypeError:
# foo.exe,foo.o references.
pass
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nptParsedFlags : ' + str(ptParsedFlags) + '\n')
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nptLinkFlags : ' + ptLinkFlags + '\n')
# Back to dir we were.
os.chdir(ptCurDir)
# Remove our temp dir.
shutil.rmtree(ptTmpDir)
# Restore env
os.environ.clear()
os.environ.update(ptSavedEnviron)
return [ptQtVersion,ptCompileFlags + ' ' + ptLinkFlags]
################################################################################
# Boilerplate to log commands nicely to screen and completely to log file.
def ptPrintCmdLine(s, target, src, env):
# Always to a log file. (and with an extra linefeed to 'see' commands)
LogFile = env['PT_LOGFILE']
LogFile.write('\n' + s + '\n')
ShortText = 'Building object'
# 'Recognized' commands ?
if 'DSCONS_CXX' in s:
ShortText = ptGreen + 'Building CXX object'
elif 'DSCONS_CC' in s:
ShortText = ptGreen + 'Building C object'
elif 'DSCONS_LINK' in s:
ShortText = ptBoldMagenta + 'Linking'
elif 'DSCONS_UIC' in s:
ShortText = ptBoldBlue + 'Generating UIC object'
elif 'DSCONS_MOC' in s:
ShortText = ptBoldBlue + 'Generating MOC object'
elif 'DSCONS_RCC' in s:
ShortText = ptBoldBlue + 'Generating RCC object'
elif 'DSCONS_WINDRES' in s:
ShortText = ptBoldBlue + 'Generating Windows resource'
elif s.endswith('.lnk') :
ShortText = ptBoldMagenta + 'Linking'
elif s.startswith('Creating'):
ShortText = ptBoldBlue + 'Creating'
else:
# Install is a kind of exception. Also it points to a func.
# We *assume* fallthrough is install. But that's a very shaky one.
# XXX TODO
#print 'DEBUG FOO : ' + s
ShortText = ptBoldMagenta + 'Creating'
MyMessage = ''
if not env['PT_VERBOSE']:
MyMessage = ShortText + ' ' + ' and '.join([str(x) for x in target])
else:
MyMessage = s
MyMessage += ptNoAttrs
print MyMessage
return None
################################################################################
# Exit function ensures color reset.
def ptLastCalledAtExit():
print ptBoldYellow + \
'Bye from the scons build program for Photivo' + \
ptNoAttrs
return None
################################################################################
# AtExit that joins the stderr collected in stderr.log into the logfile.
def ptAtExit(LogFile):
try:
LogFile.write('\nThe stderr output is :\n')
sys.stderr.flush() # Make sure the stderr is complete.
StdErrFile = open('stderr.log','r')
LogFile.write(StdErrFile.read())
StdErrFile.close()
except:
pass
return None
################################################################################
# Basically from Scons wiki : Spawn which echos stdout/stderr from the child.
# ptFoo is mine.
def ptEchoSpawn( sh, escape, cmd, args, env ):
ptFoo = ' '.join(args)
# convert env from unicode strings
asciienv = {}
for key, value in env.iteritems():
asciienv[key] = str(value)
p = subprocess.Popen(
#args,
ptFoo,
env=asciienv,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
# Does this screw up the relative order of the two?
sys.stdout.write(stdout)
sys.stderr.write(stderr)
return p.returncode
################################################################################
# Local (changed) copy of TempFileMunge
class ptTempFileMunge(object):
"""A callable class. You can set an Environment variable to this,
then call it with a string argument, then it will perform temporary
file substitution on it. This is used to circumvent the long command
line limitation.
Example usage:
env["TEMPFILE"] = TempFileMunge
env["LINKCOM"] = "${TEMPFILE('$LINK $TARGET $SOURCES')}"
By default, the name of the temporary file used begins with a
prefix of '@'. This may be configred for other tool chains by
setting '$TEMPFILEPREFIX'.
env["TEMPFILEPREFIX"] = '-@' # diab compiler
env["TEMPFILEPREFIX"] = '-via' # arm tool chain
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature):
if for_signature:
# If we're being called for signature calculation, it's
# because we're being called by the string expansion in
# Subst.py, which has the logic to strip any $( $) that
# may be in the command line we squirreled away. So we
# just return the raw command line and let the upper
# string substitution layers do their thing.
return self.cmd
# Now we're actually being called because someone is actually
# going to try to execute the command, so we have to do our
# own expansion.
cmd = env.subst_list(self.cmd, SCons.Subst.SUBST_CMD, target, source)[0]
try:
maxline = int(env.subst('$MAXLINELENGTH'))
except ValueError:
maxline = 2048
length = 0
for c in cmd:
length += len(c)
if length <= maxline:
return self.cmd
# We do a normpath because mktemp() has what appears to be
# a bug in Windows that will use a forward slash as a path
# delimiter. Windows's link mistakes that for a command line
# switch and barfs.
#
# We use the .lnk suffix for the benefit of the Phar Lap
# linkloc linker, which likes to append an .lnk suffix if
# none is given.
(fd, tmp) = tempfile.mkstemp('.lnk', text=True)
native_tmp = SCons.Util.get_native_path(os.path.normpath(tmp))
if env['SHELL'] and env['SHELL'] == 'sh':
# The sh shell will try to escape the backslashes in the
# path, so unescape them.
native_tmp = native_tmp.replace('\\', r'\\\\')
# In Cygwin, we want to use rm to delete the temporary
# file, because del does not exist in the sh shell.
rm = env.Detect('rm') or 'del'
else:
# Don't use 'rm' if the shell is not sh, because rm won't
# work with the Windows shells (cmd.exe or command.com) or
# Windows path names.
rm = 'del'
prefix = env.subst('$TEMPFILEPREFIX')
if not prefix:
prefix = '@'
# JDLA , Another round of escapes for win32, which is in msys in our case.
if sys.platform in ['win32'] :
for i,ptCmd in enumerate(cmd) :
cmd[i] = ptCmd.replace('\\','\\\\')
args = list(map(SCons.Subst.quote_spaces, cmd[1:]))
os.write(fd, " ".join(args) + "\n")
os.close(fd)
# XXX Using the SCons.Action.print_actions value directly
# like this is bogus, but expedient. This class should
# really be rewritten as an Action that defines the
# __call__() and strfunction() methods and lets the
# normal action-execution logic handle whether or not to
# print/execute the action. The problem, though, is all
# of that is decided before we execute this method as
# part of expanding the $TEMPFILE construction variable.
# Consequently, refactoring this will have to wait until
# we get more flexible with allowing Actions to exist
# independently and get strung together arbitrarily like
# Ant tasks. In the meantime, it's going to be more
# user-friendly to not let obsession with architectural
# purity get in the way of just being helpful, so we'll
# reach into SCons.Action directly.
#if SCons.Action.print_actions:
if False :
print("Using tempfile "+native_tmp+" for command line:\n"+
str(cmd[0]) + " " + " ".join(args))
return [ cmd[0], prefix + native_tmp + '\n' + rm, native_tmp ]
################################################################################
| Python |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/PackageOption.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def PackageOption(*args, **kw):
global warned
if not warned:
msg = "The PackageOption() function is deprecated; use the PackageVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return SCons.Variables.PackageVariable(*args, **kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/EnumOption.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def EnumOption(*args, **kw):
global warned
if not warned:
msg = "The EnumOption() function is deprecated; use the EnumVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return SCons.Variables.EnumVariable(*args, **kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/PathOption.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
class _PathOptionClass(object):
def warn(self):
global warned
if not warned:
msg = "The PathOption() function is deprecated; use the PathVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
def __call__(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable(*args, **kw)
def PathAccept(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable.PathAccept(*args, **kw)
def PathIsDir(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable.PathIsDir(*args, **kw)
def PathIsDirCreate(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable.PathIsDirCreate(*args, **kw)
def PathIsFile(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable.PathIsFile(*args, **kw)
def PathExists(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable.PathExists(*args, **kw)
PathOption = _PathOptionClass()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/BoolOption.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def BoolOption(*args, **kw):
global warned
if not warned:
msg = "The BoolOption() function is deprecated; use the BoolVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return SCons.Variables.BoolVariable(*args, **kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/__init__.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
from BoolOption import BoolOption # okay
from EnumOption import EnumOption # okay
from ListOption import ListOption # naja
from PackageOption import PackageOption # naja
from PathOption import PathOption # okay
warned = False
class Options(SCons.Variables.Variables):
def __init__(self, *args, **kw):
global warned
if not warned:
msg = "The Options class is deprecated; use the Variables class instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
SCons.Variables.Variables.__init__(self, *args, **kw)
def AddOptions(self, *args, **kw):
return SCons.Variables.Variables.AddVariables(self, *args, **kw)
def UnknownOptions(self, *args, **kw):
return SCons.Variables.Variables.UnknownVariables(self, *args, **kw)
def FormatOptionHelpText(self, *args, **kw):
return SCons.Variables.Variables.FormatVariableHelpText(self, *args,
**kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.