code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#! /usr/bin/env python
#coding=utf-8
# -*- coding: utf-8 -*-
import log
#
# transitionMap = {
# (fromState, event):toState,
# (fromState, event):toState,
# ...
# }
#
#
#
class IFsmCallbackObj(object):
def __init__(self):
pass
def OnEnterState(self, state):
pass
def OnLeaveState(self, state):
pass
def OnEvent(self, state, event):
pass
class CFsm(object):
def __init__(self, transitionMap, callbackObj):
self.transitionMap = transitionMap
self.callbackObj = callbackObj
self.curState = None
def Start(self, startState):
self.curState = startState
self.callbackObj.OnEnterState(startState)
def OnEvent(self, event):
key = (self.curState, event)
#print "In FSM.OnEvent:",key
if self.transitionMap.has_key(key):
#print "ok!!"
if self.callbackObj:
self.callbackObj.OnLeaveState(self.curState)
self.curState = self.transitionMap[key]
self.callbackObj.OnEnterState(self.curState)
else:
self.curState = self.transitionMap[key]
else:
if self.callbackObj:
self.callbackObj.OnEvent(self.curState, event)
| Python |
#! /usr/bin/env python
#coding=utf-8
# 道具类,道具使用,效果,影响
| Python |
#! /usr/bin/env python
#coding=utf-8
# tgame_entity
import tgame_player
import tgame_map
import tgame_mapmgr
g_builds = {}
g_entitys = {}
g_phys = {}
g_items = {}
ET_Player = 0x01
ET_Zombie = 0x02
ET_Barrier = 0x03
ET_Other = 0x04
def init():
pass
def addentity(playerinfo, AItype = None):
import tgame_player_c
global g_entitys
entity = tgame_player.TGame_Player(playerinfo, AItype)
g_entitys[playerinfo.uid] = entity
g_phys[entity.obj.phy.id] = entity
return g_entitys[playerinfo.uid]
def delentity(uid = None,id = None):
if uid:
# 玩家死亡
if uid == tgame_mapmgr.inst.objMe.player.uid:
tgame_mapmgr.inst.playerdead()
return
id = g_entitys[uid].obj.phy.id
g_entitys[uid].destroy()
g_entitys[uid] = None
g_phys[id] = None
del g_entitys[uid]
del g_phys[id]
elif id:
uid = g_phys[id].player.uid
g_phys[id].destroy()
g_entitys[uid] = None
g_phys[id] = None
del g_entitys[uid]
del g_phys[id]
def destroy():
import copy
copyg_e = copy.copy(g_entitys)
for key in copyg_e:
id = g_entitys[key].obj.phy.id
g_entitys[key].destroy()
del g_entitys[key]
del g_phys[id]
pass
def update():
for entity in g_entitys:
g_entitys[entity].update() | Python |
#! /usr/bin/env python
#coding=utf-8
import tgame_fsm
class PlayerFSMClass(tgame_fsm.IFsmCallbackObj):
TransitionMap = {
('stand', 'onMove'):'move',
('move', 'stopMove'):'stand',
('stand', 'Fire'):'atk',
}
MapState2Look = {
'move' : 'anim_stand_clip1',
'walk' : 'anim_walk_clip1',
'run' : 'anim_run_clip1',
'float' : 'anim_float_clip1',
}
def __init__(self):
self.fsm = tgame_fsm.CFsm(TransitionMap, self)
#self.sprite = # load some super cool model2d
def OnEvent(self, event):
self.fsm.OnEvent(event)
def OnEnterState(self, state):
# 比如进入某状态就切换到对应的动画
self.sprite.change_looks(MapState2Look[state])
def OnLeaveState(self, state):
pass
def OnEvent(self, state, event):
pass
| Python |
#! /usr/bin/env python
#coding=utf-8
# 道具类,道具使用,效果,影响
| Python |
# 战争迷雾系统
import iworld2d
import math3d
# 小迷雾块大小
fogsize = 30
# 累计器
visionsum = 0
# 迷雾数量
h = 768/fogsize+1
w = 1024/fogsize+1
# 视野
class tgame_vision(object):
def __init__(self, vision = 20, pos = (0,0) , mode = "pos", name = ""):
# 累计器加1
global visionsum
visionsum += 1
# 视野范围
self.vision = vision
# 模式 ui:固定UI上 pos:世界坐标
#self.mode = mode
# 坐标
self.pos = pos
# 如果没有名称,则自动创建
if name == "":
name = "view_"+str(visionsum)
self.name = name
# 不可见空实体
self.obj = None
#return self.name
foglist = {}
fogent = {}
visions = {}
# 摄像机坐标不变则不更新坐标
last_cx = 0
last_cy = 0
# 战争迷雾主要管理类
def init():
# 全屏战争迷雾表
# 数值为1 表示有迷雾,初始化,全屏迷雾
global foglist,fogent
foglist = [[1 for col in range(h)] for row in range(w)]
# 加载所有迷雾
for y in range(0, h):
for x in range(0, w):
# 加载迷雾,迷雾层为4
index = y*w+x
fogent[index] = iworld2d.image2d( texture_file = "tgame/res/world2d/txg/fog.png", layer_id = 4 , name = "_fog_" + str(index))
fogent[index].pos = (x * fogsize + 500, y * fogsize + 500)
def add_vision(vision , ent):
# 视野是是圆形的,用一个圆形的图形来代表,视野值为此圆形的倍数.用图片可直接与场景中的雾作碰撞测试
vision.obj = iworld2d.image2d( texture_file = "tgame/res/world2d/txg/fog_obj.png", layer_id = 4 , name = vision.name)
vision.obj.set_pick()
vision.obj.hide()
vision.ent = ent
global visions
visions[vision.name] = vision
# 显示视野中的迷雾
def show_fog(vision):
#print "showfog"
# 以视野中心为中间,计算所有迷雾到中心的距离,如果在范围内,则隐藏其迷雾
for y in range(0, h):
for x in range(0, w):
fog = fogent[y*w+x]
#print "ddd"
if fog.is_hide() == False:
distance = math3d.vector2(vision.pos[0],vision.pos[1]) - math3d.vector2(fog.pos[0],fog.pos[1])
#hide = False
#print "ddd"
if distance.length < (vision.vision * 30)/2:
fog.hide()
elif distance.length < (vision.vision * 35)/2:
fog.alpha = 90
elif distance.length < (vision.vision * 40)/2:
fog.alpha = 180
# 根据当前屏幕视野,更新迷雾状态,透明度,是否显示
def update_fog():
global fogent
# 显示所有迷雾
for y in range(0, h):
for x in range(0, w):
fogent[y*w+x].show()
fogent[y*w+x].alpha = 255
# 获取当前屏幕范围内的可见视野
camera_x,camera_y = iworld2d.camera_to_worldpos(0,0)
objlist = iworld2d.select_with_rect(False, camera_x, camera_y, camera_x+1024, camera_y+768)
#print objlist
if objlist:
# 获得了所有的视野,再以视野判断
for obj in objlist:
# 名称为view_开关的为视野
if len(obj.name) > 5 and obj.name[0:5] == "view_":
# 得到视野,进行迷雾开启
show_fog(visions[obj.name])
# 更新视野
def update_view():
global visions
for view in visions:
viewobj = visions[view]
#print "objpos:", viewobj.ent.pos
# 坐标
if viewobj.ent.pos != viewobj.pos:
viewobj.pos = viewobj.ent.pos
viewobj.obj.pos = viewobj.ent.pos
# 视野
#if viewobj.obj.scale != (viewobj.vision, viewobj.vision)
viewobj.obj.scale = (viewobj.vision, viewobj.vision)
#print viewobj.obj.scale
pass
# 迷雾坐标更新
def update_pos():
global fogent,last_cx,last_cy
# 更新所有迷雾的坐标
camera_x,camera_y = iworld2d.camera_to_worldpos(0,0)
if camera_x != last_cx or camera_y != last_cy:
last_cx = camera_x
last_cy = camera_y
# 加载所有迷雾
for y in range(0, h):
for x in range(0, w):
# 加载迷雾,迷雾层为4
fogent[y*w+x].pos = (x * fogsize +camera_x, y * fogsize +camera_y)
def update():
update_pos()
update_view()
update_fog()
| Python |
#! /usr/bin/env python
#coding=utf-8
# -*- coding: utf-8 -*-
import log
#
# transitionMap = {
# (fromState, event):toState,
# (fromState, event):toState,
# ...
# }
#
#
#
class IFsmCallbackObj(object):
def __init__(self):
pass
def OnEnterState(self, state):
pass
def OnLeaveState(self, state):
pass
def OnEvent(self, state, event):
pass
class CFsm(object):
def __init__(self, transitionMap, callbackObj):
self.transitionMap = transitionMap
self.callbackObj = callbackObj
self.curState = None
def Start(self, startState):
self.curState = startState
self.callbackObj.OnEnterState(startState)
def OnEvent(self, event):
key = (self.curState, event)
#print "In FSM.OnEvent:",key
if self.transitionMap.has_key(key):
#print "ok!!"
if self.callbackObj:
self.callbackObj.OnLeaveState(self.curState)
self.curState = self.transitionMap[key]
self.callbackObj.OnEnterState(self.curState)
else:
self.curState = self.transitionMap[key]
else:
if self.callbackObj:
self.callbackObj.OnEvent(self.curState, event)
| Python |
#! /usr/bin/env python
#coding=utf-8
import math3d,math
LEFT_SIDE = 0x01
RIGHT_SIDE = 0x02
NONE_SIDE = 0x03
def angle(o, s, e):
cosfi = 0
fi = 0
norm = 0
dsx = s[0] - o[0]
dsy = s[1] - o[1]
dex = e[0] - o[0]
dey = e[1] - o[1]
cosfi = dsx * dex + dsy * dey
norm = (dsx * dsx + dsy * dsy) * (dex * dex + dey * dey)
cosfi /= math.sqrt(norm)
fi = math.acos(cosfi)
if (180 * fi / math.pi < 180):
return 180 * fi / math.pi
else:
return 360 - 180 * fi / math.pi
def angle360(o, s, e):
if postionsame(o, s) or postionsame(o, e) or postionsame(s, e):
return
rotate = angle(o, s, e)
dir = math3d.vector2(e[0], e[1]) - math3d.vector2(s[0], s[1])
dir.normalize(1)
x = 0
y = 0
if dir.x < 0:
rotate = 360 - rotate
return rotate * math.pi/180
# 两点相等
def postionsame(p0,p1):
if p0[0] == p1[0] and p0[1] == p1[1]:
return True
return False
#单位化向量
def normalize(v=[]):
if len(v) == 2:
length = lambda v: (v[0]*v[0] + v[1]*v[1] ) ** 0.5
return ( v[0] / length(v), v[1] / length(v))
else:
return 0
# 计算三点的夹角
def vector3angle(A, B, C):
x1 = A[0]-B[0]
y1 = A[1]-B[1]
x2 = C[0]-B[0]
y2 = C[1]-B[1]
x = x1*x2+y1*y2
y = x1*y2-x2*y1
#if( x> 0 )
# //角度 <90
#else
# //角度> =90
angle = math.acos(x/(x*x+y*y))
#print "vector3angle",angle
angle = math.acos(x/math.sqrt(x*x+y*y))
return angle
#两向量的夹角(渣 无用)
def vector2angle(da, db):
X1 = da[0]
X2 = db[0]
Y1 = da[1]
Y2 = db[1]
angle = (X1*X2+Y1*Y2)/(math.sqrt(X1^2+X2^2)(X2^2+Y2^2))
return angle
# 逆时针 旋转向量
def RotateVectorN(x,y,d):
p = ( x * math.cos(d) - y * math.sin(d) , x * math.sin(d) + y * math.cos(d) )
return p
# 获得角对角d距离的点(渣 无用)
def anglepoint(p1, p2, p3, d):
da = ((p2[0] - p1[0]), (p2[1] - p1[1]))
#单位化
v = [da[0], da[1]]
da = normalize(v)
# 夹角
angle = vector3angle(p1, p2, p3)
ds = da
# 旋转夹角/2,得到中间向量
dirz = RotateVectorN(ds[0], ds[1], angle/2)
# p1的反向量顶点
fp1 = (p2[0] + dirz[0] * d, p2[1] + dirz[1] * d)
return fp1
# 计算线段长度
def linelength(p0, p1):
x = math.fabs(p0[0]-p1[0])
y = math.fabs(p0[1]-p1[1])
length = math.sqrt(x*x + y*y)
return length
# 计算三角形的重心(中心点)
def traingCenter(p0, p1, p2):
x1 = p0[0]
x2 = p1[0]
x3 = p2[0]
y1 = p0[1]
y2 = p1[1]
y3 = p2[1]
pos = ((x1+x2+x3)/3,(y1+y2+y3)/3)
return pos
# 判断相交
def isOnSameSide(x0, y0, x1, y1, x2, y2, x3, y3):
a = y0 - y1
b = x1 - x0
c = x0 * y1 - x1 * y0
if (a * x2 + b * y2 + c) * (a * x3 + b * y3 + c) > 0:
return True
return False
#p是要检测的点,v0,v1,v2是三角形的三个顶点。
# 计算点是否在三角形内
def isPointInside2(p, v0, v1, v2):
if isOnSameSide(p[0] ,p[1] ,v0[0] ,v0[1] ,v1[0] ,v1[1] ,v2[0] ,v2[1]) or \
isOnSameSide(p[0] ,p[1] ,v1[0] ,v1[1] ,v2[0] ,v2[1] ,v0[0] ,v0[1]) or \
isOnSameSide(p[0] ,p[1] ,v2[0] ,v2[1] ,v0[0] ,v0[1] ,v1[0] ,v1[1]):
return False
return True
#线段求交点
def linenode(P1,P2,Q1,Q2):
P1_X = P1[0]
P2_X = P2[0]
Q1_X = Q1[0]
Q2_X = Q2[0]
P1_Y = P1[1]
P2_Y = P2[1]
Q1_Y = Q1[1]
Q2_Y = Q2[1]
tmp11 = (Q1_X - P1_X) * (Q1_Y - Q2_Y) - (Q1_Y - P1_Y) * (Q1_X - Q2_X)
tmp12 = (P2_X - P1_X) * (Q1_Y - Q2_Y) - (P2_Y - P1_Y) * (Q1_X - Q2_X)
tmp13 = (P2_X - P1_X) * (Q1_Y - P1_Y) - (P2_Y - P1_Y) * (Q1_X - P1_X)
tmp14 = (P2_X - P1_X) * (Q1_Y - Q2_Y) - (P2_Y - P1_Y) * (Q1_X - Q2_X)
u = tmp11/tmp12
v = tmp13/tmp14
node = None
#若参数值同时满足0≤u≤1,0≤v≤1,则两线段有交点
if (0 <= u and u <= 1 and 0 <= v and v <= 1):
x = P1_X + (P2_X - P1_X) * u
y = P1_Y + (P2_Y - P1_Y) * u
node = (x,y)
return node
# 顺时针 旋转向量
def RotateVector(x,y,d):
p = ( x * math.cos(-d) - y * math.sin(-d) , x * math.sin(-d) + y * math.cos(-d) )
return p
# 获得一点与边的直角距离
def traingMiniWidth(p1,p2,p3):
x = p1[0] - p2[0]
y = p1[1] - p2[1]
d = 90
# 顺时针
p = RotateVector(x, y, d)
pe = ( p3[0] + p[0] * 1000, p3[1] + p[1] * 1000)
pon = linenode(p1,p2,p3,pe)
#print "xiangjiao:",pon
length = 0
if pon:
length = linelength(pon, p3)
return length
# 两点p1(x1,y1),p2(x2,y2),判断点p(x,y)在线的左边还是右边
def LeftOfLine(p, p1, p2):
x1 = p1[0]
x2 = p2[0]
y1 = p1[1]
y2 = p2[1]
x = p[0]
y = p[1]
#tmpx = (p1[0] - p2[0]) / (p1[1] - p2[1]) * (p[1] - p2[1]) + p2[0];
Tmp = (y1 - y2) * x + (x2 - x1) * y + x1 * y2 - x2 * y1
if Tmp > 0:
return LEFT_SIDE
elif Tmp < 0:
return RIGHT_SIDE
else:
return NONE_SIDE
#if (tmpx > p[0]):#当tmpx>p.x的时候,说明点在线的左边,小于在右边,等于则在线上。
# return True
#return False
#另外一种方法:
#Tmp = (y1 – y2) * x + (x2 – x1) * y + x1 * y2 – x2 * y1
#Tmp > 0 在左侧
#Tmp = 0 在线上
#Tmp < 0 在右侧
| Python |
#-- coding: utf-8 -*-
import utils, sys
lstDir = utils.listdir("tgame/script/")
print "lstDir = %s" % str(lstDir)
sys.path.extend(lstDir)
import tgame_net_handler, t_cmd
import tgame_room_logic
import iapi
import tgame_ready
import tgame_menu
tgame_API = None
GAMEID = -1
GAME_MODE = None
UID = None
GAME_ID = None
#游戏的入口函数
def start_from_init(cgame_id, game_mode, uid):
#初始化API的版本
global tgame_API
if tgame_API is None:
tgame_API = iapi.API_1_0()
#初始化游戏逻辑和ui,并显示准备界面
global UID,GAME_ID,GAME_MODE
GAME_MODE = game_mode
UID = uid
GAME_ID = cgame_id
start_game(game_mode, uid)
tgame_room_logic.init(GAME_MODE)
#注册关键回调函数
tgame_API.register_callback(
cgame_id,
tgame_room_logic.inst.logic_frame, tgame_room_logic.inst.render_frame, None,
tgame_room_logic.inst.on_key_down, tgame_room_logic.inst.on_mouse_msg, None, None, None
)
#初始化网络消息映射表
tgame_net_handler.init()
#注册网络消息定义和回调函数
tgame_API.register_game_room_msgdefine_and_callback(t_cmd.cmd_msg, tgame_net_handler.MSG_MAP)
#初始化游戏内的对象实例和ui实例
def start_game(iMode, uid):
#初始化游戏音乐音效
import tgame_sound
tgame_sound.init()
#初始化准备界面逻辑
tgame_room_logic.init(iMode)
tgame_room_logic.inst.init_myself(uid)
#初始化游戏主逻辑
import tgame_mapmgr
tgame_mapmgr.init()
#初始化准备界面ui
tgame_ready.init()
tgame_ready.inst.show()
#import tgame_ui_wait
#tgame_ui_wait.init()
#tgame_ui_wait.inst.show()
#初始化游戏区域管理逻辑
#import tgame_picmgr
#tgame_picmgr.init()
#初始化游戏中ui
#import tgame_seedbank
#tgame_seedbank.init()
#import tgame_timebar
#tgame_timebar.init()
#初始化游戏结算ui
#import tgame_result
#tgame_result.init()
#初始化游戏发送消息逻辑
import tgame_command
tgame_command.init()
#初始化游戏成就
import tgame_achieve
tgame_achieve.init()
import tgame_mapevent
tgame_mapevent.init()
#释放游戏内对象
def back_to_hall():
print "destroy games res!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
#销毁游戏准备界面逻辑和ui
tgame_room_logic.destroy()
tgame_ready.destroy()
#销毁游戏主逻辑
import tgame_mapmgr
tgame_mapmgr.destroy()
# 声音
import tgame_sound
tgame_sound.destroy()
#销毁游戏中ui和结算ui
#import tgame_seedbank
#tgame_seedbank.destroy()
#import tgame_timebar
#tgame_timebar.destroy()
#import tgame_result
#tgame_result.destroy()
#销毁游戏发送逻辑
#import tgame_command
#tgame_command.destroy()
#销毁游戏区域管理逻辑
#import tgame_picmgr
#tgame_picmgr.destroy()
#清理游戏音乐音效
#import tgame_sound
#tgame_sound.destroy()
#销毁游戏成就
#import tgame_achieve
#tgame_achieve.destroy()
#销毁API对象
global tgame_API
tgame_API = None
#import tgame_mapevent
#tgame_mapevent.destroy()
| Python |
#! /usr/bin/env python
#coding=utf-8
#-- coding: utf-8 -*-
import tgame_room_logic, t_cmd
def init():
#网络消息到具体回调函数的映射表
global MSG_MAP
MSG_MAP = {
#游戏模式信息
"game_s_game_mode" : tgame_room_logic.inst.s_game_mode, \
#玩家进入
"game_s_user_in_game" : tgame_room_logic.inst.s_user_in_game, \
#玩家退出
"game_s_user_out_game" : tgame_room_logic.inst.s_user_out_game, \
#选择关卡(服务端)
"game_s_select_stage" : tgame_room_logic.inst.s_select_stage, \
#准备/取消准备
"game_s_game_ready" : tgame_room_logic.inst.s_game_ready, \
#倒数/取消倒数
"game_s_countdown" : tgame_room_logic.inst.s_countdown, \
#房间新座位
"game_s_game_side" : tgame_room_logic.inst.s_game_side, \
#换房主
"game_s_game_master":tgame_room_logic.inst.s_game_master,\
#游戏准备
"game_s_game_init" : tgame_room_logic.inst.s_game_init, \
#游戏开始
"game_s_game_start" : tgame_room_logic.inst.s_game_start, \
#游戏具体事件(服务端)
"game_s_game_event" : tgame_room_logic.inst.s_game_event, \
#游戏结束和本局游戏结果
"game_s_game_end" : tgame_room_logic.inst.s_game_end, \
#玩家具体信息
"game_s_player_detail_info" : tgame_room_logic.inst.s_player_detail_info, \
#玩家隐私信息,由于只会收到自己的,所以没有side信息
"game_s_player_secret_info" : tgame_room_logic.inst.s_player_secret_info, \
#玩家完成的成就id
"game_s_player_finish_achieve" : tgame_room_logic.inst.s_player_finish_achieve, \
}
| Python |
#-- coding: utf-8 -*-
#游戏客户端的入口函数,当使用独立公共大厅的时候,这个函数将在玩家登陆游戏房间的时候被调用,从此程序逻辑将交给各个游戏自己完成
def init(**kargs):
#进入游戏自己的初始化函数
import tgame_game
tgame_game.start_from_init(int(kargs["gameid"]), int(kargs["gamemode"]), int(kargs["uid"]))
#游戏客户端的退出函数,当玩家从游戏房间退出时,公共大厅逻辑会调用这个函数,游戏逻辑需要在这个函数里完成游戏对象的释放工作,防止内存泄漏
def force_destroy():
#调用游戏的自清理函数,完成对象释放
import tgame_game
tgame_game.back_to_hall()
| Python |
#! /usr/bin/env python
#coding=utf-8
#-- coding: utf-8 -*-
import tgame_room_logic, t_cmd
def init():
#网络消息到具体回调函数的映射表
global MSG_MAP
MSG_MAP = {
#游戏模式信息
"game_s_game_mode" : tgame_room_logic.inst.s_game_mode, \
#玩家进入
"game_s_user_in_game" : tgame_room_logic.inst.s_user_in_game, \
#玩家退出
"game_s_user_out_game" : tgame_room_logic.inst.s_user_out_game, \
#选择关卡(服务端)
"game_s_select_stage" : tgame_room_logic.inst.s_select_stage, \
#准备/取消准备
"game_s_game_ready" : tgame_room_logic.inst.s_game_ready, \
#倒数/取消倒数
"game_s_countdown" : tgame_room_logic.inst.s_countdown, \
#房间新座位
"game_s_game_side" : tgame_room_logic.inst.s_game_side, \
#换房主
"game_s_game_master":tgame_room_logic.inst.s_game_master,\
#游戏准备
"game_s_game_init" : tgame_room_logic.inst.s_game_init, \
#游戏开始
"game_s_game_start" : tgame_room_logic.inst.s_game_start, \
#游戏具体事件(服务端)
"game_s_game_event" : tgame_room_logic.inst.s_game_event, \
#游戏结束和本局游戏结果
"game_s_game_end" : tgame_room_logic.inst.s_game_end, \
#玩家具体信息
"game_s_player_detail_info" : tgame_room_logic.inst.s_player_detail_info, \
#玩家隐私信息,由于只会收到自己的,所以没有side信息
"game_s_player_secret_info" : tgame_room_logic.inst.s_player_secret_info, \
#玩家完成的成就id
"game_s_player_finish_achieve" : tgame_room_logic.inst.s_player_finish_achieve, \
}
| Python |
import iapi
API = None
def init():
global API
API = iapi.API()
| Python |
from t_const import *
#关卡开启需求信息,key是关卡id,value是一个列表,表示开启关卡必须完成的其他关卡的id列表
STAGE_REQUIRE = {
1 : [],
}
DATA = {
1 : {
"monster" : "tgame_map_demo",
},
}
def set_stage_info():
global DATA
for value in DATA.values():
tgame_map = value["monster"]
tgame_map = __import__(tgame_map)
stage_info = tgame_map.STAGE_INFO
value['name'] = stage_info.get('stage_name', '')
value['hard'] = stage_info.get('stage_hard', '')
value['time'] = stage_info.get('stage_time', 0)
value['win_score'] = stage_info.get('stage_win_score', 0)
value['lose_score'] = stage_info.get('stage_lose_score', 0)
value['stage_img_bag'] = stage_info.get('stage_img_bag','')
value['stage_xiaoditu'] = stage_info.get('stage_xiaoditu','')
#print "stage_info = ", DATA
set_stage_info()
| Python |
# 事件定义
# 转向
C_TUREND = 0x01
S_TUREND = 0x01
# 移动
C_MOVE = 0x02
S_MOVE = 0x02
# 移动2
C_RUN = 0x03
S_RUN = 0x03
# 攻击
C_ATK = 0x04
S_ATK = 0X04
# 拾取
C_PICKITEM = 0X05
S_PICKITEM = 0X05
# 使用物品
C_USEITEM = 0X06
S_USEITEM = 0X06
# 游戏时间
S_GAMETIME = 0x91
# 出现僵尸
S_GAME_BULZB = 0x71
# 事件
S_GAME_EVENT = 0x81
"""消息结构定义"""
cmd_msg = {
#游戏模式信
"game_s_game_mode" : {
"mode" : "B",
"stage" : "I",
"master" : "B",
"max_num" : "I",
},
#游戏初始化完成
"game_c_game_init":{
"binit" : "B",
},
#游戏开始初始化
"game_s_game_init":{
"stage":"I",
},
#玩家进入
"game_s_user_in_game":{
"side" : "B",
"urs" : "s",
"uid" : "I",
"nickname" : "s",
"is_ready" : "B",
"gmlvl" : "I",
"hid" : "I",
},
#玩家退出
"game_s_user_out_game" : {
"side" : "B",
},
#选择关卡(客户端)
"game_c_select_stage" : {
"stage" : "I",
},
#选择关卡(服务端)
"game_s_select_stage" : {
"stage" : "I",
},
#准备/取消准备
"game_c_game_ready" : {
"is_ready" : "B",
},
#准备/取消准备
"game_s_game_ready" : {
"side" : "B",
"is_ready" : "B",
},
#倒数/取消倒数
"game_s_countdown" : {
"is_countdown" : "B",
},
#只含有一个int数的包,用于实现打包中出现不定长的int组成的list
"game_single_int" : {
"value" : "i",
},
#游戏开始的时候群发的游戏玩家初始化信息
"game_player_start_info" : {
"side" : "B",
"ifac" : "i",
"money" : "i",
"dicsprite" : "i",
}, \
#游戏开始
"game_s_game_start" : {
"stage" : "I",
"player_list" : "game_player_start_info",
},
#游戏具体事件(客户端)
"game_c_game_event" : {
"frame" : "I",
"event_id" : "H",
"param" : "s",
}, \
#游戏具体事件(服务端)
"game_s_game_event" : {
"frame" : "I",
"event_id" : "H",
"param" : "s",
}, \
#单个玩家的游戏结果
"game_player_end_info" : {
"side" : "B",
"rank" : "B",
"score" : "i",
"killed" : "i",
},
#游戏结束和本局游戏结果
"game_s_game_end" : {
"win" : "B",
"stage" : "I",
"player_list" : "game_player_end_info",
}, \
#离开房间
"game_c_leave_game" : {
"no_use" : "B",
},
#结束游戏
"game_c_game_end" : {
"iswin" : "B",
},
#玩家具体信息
"game_s_player_detail_info" : {
"side" : "B",
"score" : "i",
"_win" : "I",
"_lose" : "I",
"_draw" : "I",
"_break" : "I",
"avatar" : "s",
"pet" : "s",
},
#玩家隐私信息,由于只会收到自己的,所以没有side信息
"game_s_player_secret_info" : {
"yuanbao" : "i",
"yuanbao_free" : "i",
"finish_stage" : "game_single_int",
"finish_achieve" : "game_single_int",
},
#玩家完成的成就id
"game_s_player_finish_achieve" : {
"achieve_id" : "I",
},
#游戏准备
"game_s_game_init" :{
"stage" : "I",
#"_type" : S_GAME_INIT,
}, \
#房主踢人
"game_c_game_kick" :{
"hid" : "I",
},
#房间新座位客户端发送
"game_c_game_side" :{
"uid":"I",
"old_side" : "B",
"new_side" : "B",
},
#房间新座位服务器发送
"game_s_game_side" :{
"uid":"I",
"old_side" : "B",
"new_side" : "B",
},
#更新房主
"game_c_game_master":{
"master" : "B",
},
#更新房主
"game_s_game_master":{
"master" : "B",
},
}
| Python |
#-- coding: utf-8 -*-
import t_const
"""巫师指令定义方面客户端和服务端公用,其他的函数为客户端专用"""
WIZARD_CHANGE_SUN = 0x01 #改变阳光数量
WIZARD_END_GAME = 0x02 #立刻结束游戏
WIZARD_ADD_ZOMBIE = 0x03 #立刻刷新僵尸
#帮助文字字典
HELP_TEXT = {WIZARD_CHANGE_SUN : "改变阳光数量指令//change_sun:参数1:整数(可正负,代表阳光数量的改变值),示例://change_sun 100", \
WIZARD_END_GAME : "立刻结束游戏指令//end_game:参数1:0或1(1代表胜利,0代表失败),示例://end_game 1", \
WIZARD_ADD_ZOMBIE : "增加n个僵尸//add_zombie: 参数1:正整数(代表僵尸数量);参数2:正整数(代表僵尸种类,-1代表随机),示例://add_zombie 10 -1", \
}
#对外输出文字,用于向聊天窗口输出帮助文字信息
def out_put(text):
import tgame_ui_wait
tgame_ui_wait.inst.add_notice(text)
#检测某个字符串是否符合某个巫师指令的格式,在发送聊天消息前调用, 用于过滤巫师指令
def check_wizard(text):
#如果空字符串或者为None,返回不是巫师指令
if not text:
return True
#以空格为分隔符进行字符串分解
line = text.split()
#假如是需要显示帮助的指令
if line[0] == "//help":
for text in HELP_TEXT.itervalues():
out_put(text)
return False
#检测是否是游戏中巫师指令
elif check_playing_wizard(line):
return False
#检测是否是准备状态巫师指令
elif check_waiting_wizard(line):
return False
# 输入完后焦点失效
import tgame_game
tgame_game.tgame_API.chat_set_focus(False)
return True
#检测是否是游戏内巫师指令
def check_playing_wizard(line):
import tgame_mapmgr, tgame_command
#如果游戏管理对象不存在,返回false
if not tgame_mapmgr.inst:
return False
#如果并不处于游戏中状态,返回false
if tgame_mapmgr.inst.iStatus != t_const.GAME_PLAYING:
return False
#需要改变阳光数量
if line[0] == "//change_sun":
try:
dEnergy = int(line[1])
except:
out_put(HELP_TEXT[WIZARD_CHANGE_SUN])
return False
tgame_command.inst.send_wizard(WIZARD_CHANGE_SUN, [dEnergy, ])
return True
#需要立刻结束游戏
elif line[0] == "//end_game":
try:
win = int(line[1])
except:
out_put(HELP_TEXT[WIZARD_END_GAME])
return False
tgame_command.inst.send_wizard(WIZARD_END_GAME, [win, ])
return True
#需要立刻刷新僵尸
elif line[0] == "//add_zombie":
try:
iNum = int(line[1])
except:
out_put(HELP_TEXT[WIZARD_ADD_ZOMBIE])
return False
if iNum <= 0:
out_put(HELP_TEXT[WIZARD_ADD_ZOMBIE])
return False
try:
iJob = int(line[2])
except:
iJob = -1
tgame_command.inst.send_wizard(WIZARD_ADD_ZOMBIE, [iNum, iJob])
return True
return False
#检测是否是准备状态巫师指令
def check_waiting_wizard(line):
import tgame_mapmgr, tgame_command
if not tgame_mapmgr.inst:
return False
if tgame_mapmgr.inst.iStatus != t_const.GAME_WAIT:
return False
return False
| Python |
# -*- coding: utf-8 -*-
import base_object
import t_cmd
import cg_network
import cPickle
import tgame_room_logic
import tgame_mapmgr
inst = None
#模块初始化函数
def init():
global inst
if not inst:
inst = CCommand()
#模块自清理函数,释放引用
def destroy():
global inst
if inst:
inst.destroy()
inst = None
#网络消息打包和发送的类,所有客户端向服务端发送的消息都在这里打包和发送
class CCommand(base_object.CObject):
def __init__(self):
super(CCommand, self).__init__()
#自清理函数,因为这个对象没有成员变量,所有也不需要释放什么东西
def destroy(self):
pass
#选择关卡消息的打包发送函数:iStage为关卡的id
def send_select_stage(self, iStage):
cg_network.sender.game_c_select_stage(stage=iStage)
#准备/取消准备消息的打包发送函数:flag=1代表准备完毕,flag=0代表取消准备
def send_game_ready(self, flag):
cg_network.sender.game_c_game_ready(is_ready=flag)
#离开房间消息的打包发送函数:无参数
def send_leave_game(self):
cg_network.sender.game_c_leave_game(no_use=0)
#单机用的结束游戏
def send_game_end(self,flag):
cg_network.sender.game_c_game_end(iswin=flag)
#踢人
def send_game_kick(self,obj):
cg_network.sender.game_c_game_kick(hid=obj.hid)
#换座位
def send_game_side(self,iuid,old,new):
cg_network.sender.game_c_game_side(uid=iuid,old_side=old,new_side=new)
#换房主
def send_game_master(self,imaster):
cg_network.sender.game_c_game_master(master=imaster)
#巫师指令的打包发送函数:cmds是巫师指令id;param_list是具体的指令信息(根据id不同,长度可变的int列表)
def send_wizard(self, cmds, param_list):
int_list = []
for key in param_list:
obj = cg_network.get_game_room_msgmgr().game_single_int(value=key)
int_list.append(obj)
cg_network.sender.game_c_wizard(head=cmds, param=int_list)
#游戏初始化ok
def send_game_init(self, flag):
print "send_game_init_deno!"
cg_network.sender.game_c_game_init(binit=flag)
#游戏内子事件的打包发送函数:event是子事件的id;param_list是子事件的具体内容
def send_game_event(self, event, param_list):
_str = cPickle.dumps(param_list)
#cg_network.sender.game_c_game_event(frame=0, event_id=event, param=_str)
def send_turend(self, rotate):
param = (tgame_room_logic.inst.objMe.uid, rotate)
self.send_game_event(t_cmd.C_TUREND, param)
def send_move(self, ismove, dirx, diry, speed):
param = (tgame_room_logic.inst.objMe.uid, ismove, dirx, diry, speed)
self.send_game_event(t_cmd.C_MOVE, param)
def send_run(self, posx, posy):
param = (tgame_room_logic.inst.objMe.uid, posx, posy)
self.send_game_event(t_cmd.C_RUN, param)
def send_atk(self, pos, dir, weapon):
param = (tgame_room_logic.inst.objMe.uid, pos, dir, weapon)
self.send_game_event(t_cmd.C_ATK, param)
# 发送拾取物品消息 参数(物品名称)
def send_pickitem(self, names):
param = (tgame_room_logic.inst.objMe.uid, names)
self.send_game_event(t_cmd.C_PICKITEM, param)
#---------------------------------------------------------------------- | Python |
'''
main.py
Main file for 2 Pints
2 Pints is an implementation of Quarto written in Python
Dave Schwantes
2010
www.dinosaurseateverybody.com
'''
from QuartoGame import *
def main():
print "2 Pints"
print "a Quarto implementation by Dave Schwantes"
print ""
game = QuartoGame()
print "The Board:"
game.displayBoard()
print ""
while not game.isGameOver():
print "Available Pieces:"
game.displayAvailablePieces()
print ""
print "Player %i select a piece:" % (game.turn)
selected_piece = input()
game.selectPiece(selected_piece)
game.nextTurn()
print "Selected Piece:"
game.selectedPiece.displayPiece()
print "Player %i select x for piece:" % (game.turn)
x = input()
print "Player %i select y for piece:" % (game.turn)
y = input()
game.placePiece(x,y)
game.displayBoard()
print ""
print "Game Over"
print "Winner: player %i" % (game.turn)
if __name__ == '__main__':
main() | Python |
import sys
from copy import deepcopy
class QuartoBoard:
def __init__(self):
self.spots = [None] * 4
for i in range(4):
self.spots[i] = [None] * 4
def placePiece(self,piece,x,y):
if not self.spots[x][y]:
self.spots[x][y] = deepcopy(piece)
return True
return False
def displayBoard(self):
i = 0
while i<=13:
sys.stdout.write("-")
i +=1
y = 11
while y >= 0:
print ""
x = 0
while x <= 12:
if y%3 == 0:
sys.stdout.write("-")
else:
spot_x = x/3
spot_y = y/3
if x%3 == 0:
sys.stdout.write("|")
elif self.spots[spot_x][spot_y]:
if x%3 == 1 and y%3 == 2:
attr = 0
elif x%3 == 2 and y%3 == 2:
attr = 1
elif x%3 == 1 and y%3 == 1:
attr = 2
else:
attr = 3
sys.stdout.write(self.spots[spot_x][spot_y].attributes[attr])
else:
sys.stdout.write(" ")
x += 1
y -= 1
| Python |
'''
QuartoGame.py
Class for the QuartoGame object
'''
import math
from QuartoBoard import *
from QuartoPiece import *
class QuartoGame:
def __init__(self):
self.board = QuartoBoard()
self.turn = 0
self.availablePieces = []
self.buildPieces()
self.history = [] # ordered list of placed pieces and their (x,y) tuple location
self.selectedPiece = None
def buildPieces(self):
i = 0
while i < 16:
attr_val = self.int2bin(i)
self.availablePieces.append(QuartoPiece(attr_val[0], attr_val[1], attr_val[2], attr_val[3]))
i += 1
def nextTurn(self):
self.turn = int(math.fabs(self.turn - 1))
def isWon(self):
if len(self.availablePieces) > 12:
# you can't win without putting down 4 pieces
return False
else:
attr = 0
while attr < 4:
i = 0
while i < 4:
four_sum = 0
for p in self.board.spots[i][:]:
if p : four_sum += int(p.attributes[attr])
else : four_sum +=5
if four_sum == 0 or four_sum == 4:
print "four_sum = %i, i = %i, attr = %i" % (four_sum,i,attr)
return True
four_sum = 0
for p in self.board.spots[:][i]:
if p : four_sum += int(p.attributes[attr])
else : four_sum +=5
if four_sum == 0 or four_sum == 4:
print "four_sum = %i, i = %i, attr = %i" % (four_sum,i,attr)
return True
i +=1
# for the diagonal
i = 0
while i < 4:
four_sum = 0
if self.board.spots[i][i] : four_sum += int(self.board.spots[i][i].attributes[attr])
else : four_sum +=5
if four_sum == 0 or four_sum == 4:
print "four_sum = %i, i = %i, attr = %i" % (four_sum,i,attr)
return True
i += 1
i = 0
while i < 4:
four_sum = 0
if self.board.spots[i][int(math.fabs(i-3))] : four_sum += int(self.board.spots[i][int(math.fabs(i-3))].attributes[attr])
else : four_sum +=5
if four_sum == 0 or four_sum == 4:
print "four_sum = %i, i = %i, attr = %i" % (four_sum,i,attr)
return True
i += 1
attr +=1
return False
def isDraw(self):
if not self.isWon():
if len(self.availablePieces) == 0:
return True
return False
def isGameOver(self):
if self.isWon():
return True
if self.isDraw():
return True
return False
def selectPiece(self, piece_num):
if piece_num < len(self.availablePieces):
self.selectedPiece = self.availablePieces.pop(piece_num)
return True
return False
def placePiece(self,x,y):
if self.selectedPiece:
if self.board.placePiece(self.selectedPiece,x,y):
self.selectedPiece.placedBy = self.turn
self.selectedPiece.onBoard = True
self.history.append((deepcopy(self.selectedPiece),(x,y))) # add move to history
self.selectedPiece = None # clear selected piece in game
return True
return False
def getState(self):
return (self.board, self.selectedPiece)
def displayAvailablePieces(self):
x = 0
for p in self.availablePieces:
print "%i = " % (x)
x += 1
p.displayPiece()
def displayBoard(self):
self.board.displayBoard()
# General Functions
def int2bin(self, n, count=4):
return "".join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])
| Python |
'''
main.py
Main file for 2 Pints
2 Pints is an implementation of Quarto written in Python
Dave Schwantes
2010
www.dinosaurseateverybody.com
'''
from QuartoGame import *
from TwoPints import *
def main():
print "2 Pints"
print "a Quarto playing bot by Dave Schwantes"
print ""
game = QuartoGame()
bot = TwoPints()
print "The Board:"
game.displayBoard()
print ""
while not game.isGameOver():
if game.turn == 0:
print "2 Pints Bot selecting piece..."
bot.selectPiece(game)
print "Piece selected."
else:
print "Available Pieces:"
game.displayAvailablePieces()
print ""
print "Player %i select a piece:" % (game.turn)
selected_piece = input()
game.selectPiece(selected_piece)
game.nextTurn()
print "Selected Piece:"
game.selectedPiece.displayPiece()
if game.turn == 0:
print "2 Pints Bot placing piece on board..."
bot.placePiece(game)
print "Piece placed on board."
else:
print "Player %i select x for piece:" % (game.turn)
x = input()
print "Player %i select y for piece:" % (game.turn)
y = input()
game.placePiece(x,y)
game.displayBoard()
print ""
print "Game Over"
print "Winner: player %i" % (game.turn)
if __name__ == '__main__':
main() | Python |
'''
TwoPints.py
Class for TwoPints Object
Two Pints is the computer "player"
'''
import random
from copy import deepcopy
class TwoPints:
def __init__(self):
pass
def makeMove(self):
pass
def selectPiece(self,game):
# for the opening move select a random piece
if len(game.availablePieces) == 16:
start_piece = random.randint(0,15)
game.selectPiece(start_piece)
pass
def placePiece(self,game):
pass
# Alpha-beta pruning stuff:
def getNextStateAlphabeta(self,game,lookahead):
def max_value(self,state,alpha,beta,depth):
# check if we've reached the depth or if we're at a final state ( where the selectedPiece = None )
if depth > lookahead or not state[1]:
return self.stateEval(state)
v = float('-inf') # -infinity
for s in self.getSuccessors(state):
v = max(v, min_value(s,alpha,beta,depth+1))
if v >= beta:
return v
alpha = max(alpha,v)
return v
def min_value(self,state,alpha,beta,depth):
# check if we've reached the depth or if we're at a final state ( where the selectedPiece = None )
if depth > lookahead or not state[1]:
return self.stateEval(state)
v = float('inf') # infinity
for s in self.getSuccessors(state):
v = min(v, max_value(s,alpha,beta,depth+1))
if v <= alpha:
return v
beta = min(beta,v)
return v
neg_infinity = float('-inf')
infinity = float('inf')
state = self.argmax(self.getSuccessors(state), lambda s:min_value(s,neg_infinity,infinity,0))
return state
def getSuccessors(self,state):
'''
state = a QuartoGame
starting with naive approach
need to work on accounting for symmetries
'''
successors = []
x = 0
while x < 4:
y = 0
while y < 4:
if not state.board.spots[x][y]:
new_state = deepcopy(state)
new_state.placePiece(x,y)
for p in new_state.availablePieces:
new_state.selectPiece(p)
successors.append(new_state)
return successors
def stateEval(self):
pass
def argmin(self,seq, fn):
'''Return an element with lowest fn(seq[i]) score; tie goes to first one.
>>> argmin(['one', 'to', 'three'], len)
'to'
'''
best = seq[0]; best_score = fn(best)
for x in seq:
x_score = fn(x)
if x_score < best_score:
best, best_score = x, x_score
return best
def argmax(self,seq, fn):
'''Return an element with highest fn(seq[i]) score; tie goes to first one.
>>> argmax(['one', 'to', 'three'], len)
'three'
'''
return self.argmin(seq, lambda x: -fn(x))
def int2bin(self, n, count=4):
return "".join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])
| Python |
'''
QuartoPiece.py
Class for the QuartoPiece object
def of
'''
class QuartoPiece:
def __init__(self, height, color, shape, fill):
self.attributes = (height, color, shape, fill)
self.onBoard = False
self.placedBy = None
pass
def displayPiece(self):
print self.attributes | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from django.views.generic.simple import direct_to_template
import settings
urlpatterns = patterns('',
(r'^$', 'Exam.views.exam_list'),
(r'^exams/?$', 'Exam.views.exam_list'),
(r'^view/(\d+)/?$', 'Exam.views.view_exam'),
(r'^create/?$', 'Exam.views.create_exam'),
(r'^delete/(\d+)/?$', 'Exam.views.delete_exam'),
(r'^grades/?$', 'Exam.views.calc_grades'),
(r'^accounts/login/$','django.contrib.auth.views.login'),
(r'^accounts/logout/$','Exam.views.logout_view'),
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| Python |
import os.path
# Django settings for YourSchool project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'C:/Dropbox/GLS/school.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
MEDIA_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'media')
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
'static'
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@@%xq!v^)plh242dt9l3k!8@29atzk0$26yf+x@-rc55z*k7fv'
# List of callables that know how to import templatess from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'YourSchool.urls'
TEMPLATE_DIRS = (
'templates'
)
TEMPLATE_CONTEXT_PROCESSORS = ("django.contrib.auth.context_processors.auth","django.core.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", 'django.core.context_processors.request',)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'Exam',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
LOGIN_URL = '/accounts/login'
LOGIN_REDIRECT_URL = '/exams/' | Python |
from django.db import models
class Exam(models.Model):
name = models.CharField(max_length=64)
created = models.DateField()
deadline = models.DateField()
author = models.CharField(max_length=64)
class ExamQuestion(models.Model):
question = models.CharField(max_length=64)
exam = models.ForeignKey(Exam)
class ExamAnswer(models.Model):
answer = models.CharField(max_length=64)
question = models.ForeignKey(ExamQuestion)
correctAnswer = models.IntegerField()
class UserAnswer(models.Model):
username = models.CharField(max_length=64)
answer = models.ForeignKey(ExamAnswer)
class Teacher(models.Model):
username = models.CharField(max_length=64)
class Grades(models.Model):
username = models.CharField(max_length=64)
exam = models.ForeignKey(Exam)
grade = models.FloatField()
class allGrades(models.Model):
name = models.CharField(max_length=64)
created = models.DateField()
deadline = models.DateField()
author = models.CharField(max_length=64)
username = models.CharField(max_length=64)
exam = models.ForeignKey(Exam)
grade = models.FloatField()
class Meta:
db_table = 'exams_allGrades'
managed = False | Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
# Create your views here.
# -*- coding: utf-8 -*-
from audioop import reverse
from datetime import datetime, date
import string
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, redirect, HttpResponse, RequestContext, get_object_or_404
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from pprint import pprint
from Exam.models import *
# Check if user is a teacher
def is_teacher(user):
return Teacher.objects.all().filter(username=user).count() > 0
@login_required
def index(request):
request.teacher = is_teacher(request.user)
return render_to_response('index.html', RequestContext(request))
@login_required
def logout_view(request):
logout(request)
return redirect(exam_list)
@login_required
def exam_list(request):
request.teacher = is_teacher(request.user)
expired_exams = Exam.objects.all().filter(deadline__lt = date.today())
if request.teacher:
comp_exams = allGrades.objects.all()
open_exams = Exam.objects.all().filter(deadline__gte = date.today())
else:
comp_exams = allGrades.objects.all().filter(username = request.user)
open_exams = Exam.objects.all().filter(deadline__gte = date.today()).exclude(allgrades__in = comp_exams)
obj = { 'open_exams' : open_exams, 'expired_exams' : expired_exams, 'comp_exams' : comp_exams }
return render_to_response('exam_list.html', obj, RequestContext(request))
@login_required
def create_exam(request):
request.teacher = is_teacher(request.user)
if not request.teacher:
return HttpResponseRedirect('/exams')
if request.method == 'GET':
return render_to_response('create_exam.html', RequestContext(request))
else:
form = request.POST
qL = form.getlist('question')
tmp_e = Exam(name = form['name'], created = datetime.now(), deadline = form['deadline'], author = request.user)
tmp_e.save() # Save Exam
getQ = form.getlist('q')
getA = form.getlist('a')
q_counter = 0
for q_tmp in getQ:
q_counter += 1
a_counter = 0
for a_tmp in getA:
a_counter += 1
# Questions cannot be empty
if q_counter < 1:
return HttpResponse('Questions can not be empty!')
# Answers cannot be empty
if a_counter < 1:
return HttpResponse('Answers can not be empty!')
i = 0
for q in qL:
tmp_q = ExamQuestion(question = q, exam_id = tmp_e.id)
tmp_q.save() # Save Question
aL = form.getlist('answer[' + str(getQ[i]) + ']')
j = 0
for a in aL:
tmp_a = ExamAnswer(answer = a, question_id = tmp_q.id, correctAnswer = form['correct[' + str(getQ[i]) + '][' + str(getA[j]) + ']'])
tmp_a.save() # Save Answer
j += 1
i += 1
return HttpResponseRedirect('/exams')
@login_required
def view_exam(request, exam_id):
request.teacher = is_teacher(request.user)
tmp_e = Exam.objects.get(pk = exam_id)
uhc = Grades.objects.all().filter(username = request.user, exam__id = exam_id)
# Teachers can not take exams
# Student may not have taken the exam
# Exam deadline can not been reached
if (request.teacher or uhc.count() > 0) or (tmp_e.deadline < date.today()):
return HttpResponseRedirect('/exams')
if request.method == 'GET':
question = ExamQuestion.objects.filter(exam__id = exam_id)
q = []
for quest in question:
a = []
answers = ExamAnswer.objects.filter(question__id = quest.id)
for ans in answers:
a.append({"Aid" : ans.id, "Aanswer" : ans.answer, "Aquestion_id" : ans.question_id, "AcorrectAnswer" : ans.correctAnswer})
q.append({"Qid" : quest.id,
"Qquestion" : quest.question,
"Qexam_id" : quest.exam_id,
"Qanswer" : a })
obj = {"exam_questions" : q}
return render_to_response('exam_details.html', obj, RequestContext(request))
else:
form = UserAnswer(request.POST)
form1 = form.id
count = 1
answ = []
for a in form1:
obje = UserAnswer(username = request.user.username, answer_id = form1[str(count)])
#find out which grades are correct.
ob = ExamAnswer.objects.get(id = form1[str(count)])
if ob.correctAnswer == 1:
answ.append(1)
else:
answ.append(0)
#save the object to the database
obje.save()
count += 1
#calculate grade and save it to the database
weight = 10.0/(count-1)
grade = 0
for a in answ:
grade += (a * weight)
obj_grade = Grades(username = request.user, exam_id = exam_id, grade = grade)
obj_grade.save()
return HttpResponseRedirect('/grades')
def delete_exam(request, exam_id):
request.teacher = is_teacher(request.user)
if not request.teacher:
return HttpResponseRedirect('/exams')
g = Grades.objects.all().filter(exam__id = exam_id)
e = Exam.objects.all().filter(pk = exam_id)
q = ExamQuestion.objects.all().filter(exam__id = exam_id)
for qitem in q:
a = ExamAnswer.objects.all().filter(question__id = qitem.id)
a.delete()
g.delete()
e.delete()
q.delete()
return HttpResponseRedirect('/exams')
def calc_grades(request):
MyGrades = allGrades.objects.filter(username = request.user)
GradesAndAverage = []
sum_my_grades = 0
count_my_grades = 0
for g in MyGrades:
gr = Grades.objects.filter(exam__id = g.exam_id).order_by('exam__id')
count_my_grades += 1
sum_my_grades += float(g.grade)
count = 0
sum = 0.0
for av in gr:
sum = sum + float(av.grade)
count += 1
aver = (sum/count)
GradesAndAverage.append({"name" : g.name, "username" : g.username, "grade" : g.grade, "average" : aver})
my_Grades = sum_my_grades/count_my_grades
if MyGrades.count() > 0:
obj = {"all" : GradesAndAverage, "Average_my_grades" : my_Grades, "GradeCount" : MyGrades.count()}
else:
obj = {"GradeCount" : MyGrades.count()}
return render_to_response('grades.html', obj, RequestContext(request))
| Python |
import simplejson, flickrapi, liblo, pyglet, math, rabbyt
from urllib import urlopen
class skreenPics:
"""class to hold the pictures from flickr"""
api_key = '59725ca9f32547bd79f608c18a2b7669'
flickr = flickrapi.FlickrAPI(api_key)
def jsonPstrip(self,str):
"""strip jsonP style function call wrappers"""
return str[str.find('(')+1:str.rfind(')')]
def flickrRequest(self,func, **args):
"""return simplejson parsed object of results"""
return simplejson.loads(self.jsonPstrip(func(format='json',**args)))
def __init__(self):
"""do all the magic, and populate the sprites varable"""
photos=self.flickrRequest(self.flickr.photos_search,tags="ir")['photos']['photo']
urls=["http://farm%(farm)s.static.flickr.com/%(server)s/%(id)s_%(secret)s.jpg" % x for x in photos]
images=[pyglet.image.load('a.jpg',urlopen(x)) for x in urls[0:10]]
self.sprites=[rabbyt.Sprite(texture=x.texture) for x in images]
[[sprite._set_scale(0.5)] for sprite in self.sprites]
class photoDeck(list):
"""a stack of photos"""
def draw(self):
"""draw all of the photos, in stack order"""
rabbyt.clear()
rabbyt.render_unsorted(self)
def pull(self,photo):
"""put the last touched photo on top"""
if photo != None:
self.remove(photo)
self.append(photo)
def touchedPhoto(self,coord):
"""return the touched photo at coordinates"""
x,y=coord
for photo in self.__reversed__():
#~ if self.debug: print photo.x, photo.y, photo.left, photo.right, photo.top, photo.bottom
if (photo.left < x) and (x < photo.right) and (photo.bottom < y) and (y < photo.top):
return photo
return None
class touchList(list):
"""a class that responds to OSC messages with TUIO data"""
lastAlive=[]
lastSet=[]
def tuioAlive(self,pointsList):
"""process the arguments of a TUIO Alive message"""
for x in self:
if x[0] not in pointsList:
self.remove(x)
self.statsRemove(x)
def tuioSet(self,values):
"""process a TUIO Set message"""
self.append(values)
self.statsUpdate(values)
def handleOsc(self,path,args,types,src):
"""OSC spawned event callback"""
if args[0]=='alive':
messages=args[1:len(args)]
if messages != self.lastAlive:
self.tuioAlive(messages)
self.lastAlive=messages
if args[0]=='set':
messages=args[1:len(args)]
if messages != self.lastSet:
for x in self:
if x[0]==args[1]:
self.remove(x)
self.tuioSet(args[1:len(args)])
class orienter(touchList):
"""orient the touches to the display"""
##osc
flipX=True
flipY=True
flipXDirection=True
flipYDirection=True
#TuioSimulator
#~ flipX=False
#~ flipY=True
#~ flipXDirection=False
#~ flipYDirection=True
#-------------------------------------------------------------------
def convert(self,x,y):
"""convert a point"""
w=self.window.width
h=self.window.height
if self.flipX:
x=w-(x*w)
else:
x=x*w
if self.flipY:
y=h-(y*h)
else:
y=y*h
return x,y
def convertChange(self,x,y):
"""convert a distance"""
w=self.window.width
h=self.window.height
x,y = self.convert(x,y)
if self.flipX:
x=w-x
if self.flipXDirection:
x=-x
if self.flipY:
y=h-y
if self.flipYDirection:
y=-y
return x,y
class touchStats(orienter):
"""a class to hold observed stats about 2D cursor messages"""
previously=[]
eventStarts=[]
differences=[]
latest=[]
lastDiff=[]
start=False
def calculateDifferences(self):
"""calculate the distance between the current and previous updates"""
for x in self.previously:
if x[0]==self.latest[0]:
then=x
now=self.latest
#~ if self.debug: print "then", then, "now", now
thisDiff=(now[0],now[1]-then[1],now[2]-then[2])
self.differences.append(thisDiff)
self.lastDiff=thisDiff
def determineStart(self):
"""determine if we are processing an initial event"""
self.start=True
for x in self.previously:
if x[0]==self.latest[0]:
self.previously.remove(x)
self.start=False
self.previously.append(self.latest)
if self.start:
self.eventStarts.append(self.latest)
self.lastDiff=[self.latest[0],0,0]
#~ if self.debug: print "START",self.start
def statsUpdate(self,values):
"""update the internal stats with the given value"""
self.latest=values
#~ if self.debug: print "statsUpdate"
#~ if self.debug: print self.latest
#~ if self.debug: print self.previously
#~ if self.debug: print self.eventStarts
self.calculateDifferences()
self.removeItem(self.differences,values[0])
self.determineStart()
#~ if self.debug: print self.latest
#~ if self.debug: print self.previously
#~ if self.debug: print self.eventStarts
self.fingerUpdate()
def removeItem(self,alist,ID):
"""removes the latest event from the specified list"""
#~ if self.debug: print "removeItem"
for x in alist:
if x[0]==ID:
alist.remove(x)
def statsRemove(self,finger):
"""remove the specified values from the various lists"""
#~ if self.debug: print "statsRemove",finger
ID=finger[0]
self.removeItem(self.eventStarts,ID)
self.removeItem(self.differences,ID)
self.removeItem(self.previously,ID)
#~ if self.debug: print finger
#~ if self.debug: print self.previously
#~ if self.debug: print self.eventStarts
#~ if self.debug: print self.differences
self.fingerRemove(finger)
class fingerTracker(touchStats):
"""process the state of the fingers on the screen"""
fingerPhoto=[]
canvasFingers=[]
def determineSelected(self):
"""bind fingers to pictures, or the canvas"""
if self.start:
lD=self.latest[0]
finger=self.latest
xy=self.latest[1],self.latest[2]
realxy=self.convert(*xy)
photo=self.deck.touchedPhoto(realxy)
if photo==None:
self.canvasFingers.append(finger)
else:
self.fingerPhoto.append((finger,photo))
self.photoFingerList=[x[0][0] for x in self.fingerPhoto]
self.canvasFingerList=[x[0] for x in self.canvasFingers]
def fingerRemove(self,finger):
"""remove a finger"""
ID=finger[0]
#~ if self.debug: print "fingerRemove"
for finger,photo in self.fingerPhoto:
if finger[0]==ID:
self.fingerPhoto.remove((finger,photo))
for finger in self.canvasFingers:
if finger[0]==ID:
self.canvasFingers.remove(finger)
self.canvasFingerList=[x[0] for x in self.canvasFingers]
self.removeFingerCounts(finger)
def fingerUpdate(self):
"""callback to update finger states"""
self.determineSelected()
#~ if self.debug: print "fP",self.fingerPhoto
#~ if self.debug: print "CF",self.canvasFingers
self.countFingers()
class fingerCounter(fingerTracker):
movePhotos=[]
rotatePhotos=[]
scalePhotos=[]
moveCanvas=(0,0)
previousCenter=[]
previousAngle=[]
previousSpread=[]
thisPhoto=None
thisCenterPoint=(0,0)
thisAngle=0
thisSpread=0
angleDiff=0
spreadDiff=0
GRAD_PI=180.0 / math.pi
def getAngleTrig(self,point):
"""find the angle from the origin of a point, from touchlib"""
x,y=point
if x==0:
if y < 0:
return 270
else:
return 90
elif y ==0:
if x < 0:
return 180
else:
return 0
if y > 0:
if x > 0:
return math.atan(y/x) * self.GRAD_PI
else:
return 180.0 - math.atan(y/-x) * self.GRAD_PI
else:
if x > 0:
return 360.0 - math.atan(-y/x) * self.GRAD_PI
else:
return 180.0 + math.atan(-y/-x) * self.GRAD_PI
def photoFingerCount(self):
ID=self.latest[0]
self.fingercount=[]
if ID in self.photoFingerList:
for finger,photo in self.fingerPhoto:
if finger[0]==ID:
self.thisPhoto=photo
break
for finger,photo in self.fingerPhoto:
if photo==self.thisPhoto:
for touches in self:
if finger[0]==touches[0]:
self.fingercount.append(touches)
else:
for canvasfinger in self.canvasFingers:
for finger in self:
if canvasfinger[0]==finger[0]:
self.fingercount.append(finger)
def findCenterPoint(self):
points=[]
for finger in self.fingercount:
points.append((finger[1],finger[2]))
avgX=sum([x for x,y in points]) / len(points)
avgY=sum([y for x,y in points]) / len(points)
self.thisCenterPoint=(avgX,avgY)
def findAngle(self):
fc=self.fingercount
normalPoint=(fc[0][1]-fc[1][1],fc[0][2]-fc[1][2])
self.thisAngle=self.getAngleTrig(normalPoint)
print "angle", self.thisAngle, normalPoint
def findAngleDiff(self):
for entry in self.previousAngle:
for finger in entry[0]:
if self.latest[0] == finger[0]:
self.angleDiff=entry[1]-self.thisAngle
break
def findSpread(self):
fc=self.fingercount
self.thisSpread=(float(fc[0][1])-float(fc[1][1]))**2 + (float(fc[0][2])-float(fc[1][2]))**2
def findSpreadDiff(self):
for entry in self.previousSpread:
for finger in entry[0]:
if self.latest[0] == finger[0]:
self.spreadDiff=float(self.thisSpread-entry[1]) / float(entry[1])
break
def singleFingers(self):
if not self.start:
if len(self.fingercount) == 1:
for diff in self.differences:
if diff[0] == self.latest[0]:
for finger, photo in self.fingerPhoto:
if finger[0]==self.latest[0]:
self.movePhotos.append((photo,diff[1],diff[2]))
def twoFingers(self):
if len(self.fingercount) == 2:
self.findCenterPoint()
self.findAngle()
self.findSpread()
self.processTwo()
def removeFingerCounts(self,latest):
for entry in self.previousAngle:
for finger in entry[0]:
if latest[0] == finger[0]:
self.previousAngle.remove(entry)
for entry in self.previousSpread:
for finger in entry[0]:
if latest[0] == finger[0]:
self.previousSpread.remove(entry)
def processTwo(self):
self.findAngleDiff()
self.findSpreadDiff()
self.removeFingerCounts(self.latest)
self.previousAngle.append((self.fingercount,self.thisAngle))
self.previousSpread.append((self.fingercount,self.thisSpread))
self.queueTwo()
def queueTwo(self):
self.rotatePhotos.append((self.thisPhoto,self.angleDiff))
self.scalePhotos.append((self.thisPhoto,self.spreadDiff))
print "rotate", self.rotatePhotos
print "scale", self.scalePhotos
thisRot=self.rotatePhotos[0]
thisScale=self.scalePhotos[0]
thisRot[0].rot+=thisRot[1]
thisScale[0].scale+=thisScale[1]
def multipleFingers(self):
if len(self.fingercount) > 2:
self.findCenterPoint()
self.processMultiple()
def processMultiple(self):
pass
def countFingers(self):
#diffFingers=[x[0] for x in self.differences]
#if self.latest[0] in diffFingers:
self.movePhotos=[]
self.rotatePhotos=[]
self.scalePhotos=[]
self.thisPhoto=None
self.photoFingerCount()
self.singleFingers()
self.twoFingers()
self.multipleFingers()
self.takeAction()
class fingerActions(fingerCounter):
"""perform actions based on the calculated stats of actions"""
def rotatePictures(self):
pass
def moveCanvas(self):
"""move the canvas based on differences of canvas bound fingers"""
#~ if self.debug: print "moveCanvas"
cDx,cDy = 0,0
if self.latest[0] in self.canvasFingerList:
diff=self.lastDiff
cDx,cDy = (cDx + diff[1]) / 2 , (cDy + diff[2]) / 2
cDx,cDy = self.convertChange(cDx,cDy)
photoList=[x[1] for x in self.fingerPhoto]
for photo in self.deck:
if photo not in photoList:
photo.x += cDx
photo.y += cDy
def redraw(self):
"""clear the window and redraw the deck"""
self.deck.draw()
def movePictures(self):
"""move pictures based on finger stats"""
#~ if self.debug: print "movePictures"
if self.latest[0] in self.photoFingerList:
for finger,photo in self.fingerPhoto:
if finger[0]==self.latest[0]:
dx,dy=0,0
latest,diff=self.latest,self.lastDiff
if latest[0]==diff[0]:
dx,dy = (dx+diff[1]) , (dy+diff[2])
dx,dy = self.convertChange(dx,dy)
photo.x+=dx
photo.y+=dy
self.deck.pull(photo)
break
def takeAction(self):
"""callback to take action and then to redraw"""
self.movePictures()
self.rotatePictures()
self.moveCanvas()
self.redraw()
class canvas(fingerActions):
"""main window"""
window=pyglet.window.Window(fullscreen=True)
#window=pyglet.window.Window(width=1024,height=768)
#window=pyglet.window.Window()
#~ debug = False
def __init__(self,deck):
self.deck=deck
fingerActions.__init__(self)
def redraw(self):
"""clear the window and redraw the deck"""
self.deck.draw()
def pullLast(self):
"""pull the last touched photo"""
self.deck.pull(self.latestTouched)
#window=pyglet.window.Window()
def debugtuio(path,args,types,src):
"""print the arguments to the /tuio/2dcur message"""
print args
def main():
def tuio(dt):
"""pyglet callback event to check for OSC messages"""
server.recv(0.0333333333333)
skreen=canvas(photoDeck(skreenPics().sprites))
server = liblo.Server(3333)
server.add_method('/tuio/2Dcur', None, skreen.handleOsc)
#server.add_method('/tuio/2Dcur', None, debugtuio)
pyglet.clock.schedule_interval(tuio, 0.0001)
pyglet.clock.schedule(rabbyt.add_time)
rabbyt.set_default_attribs()
rabbyt.set_viewport((skreen.window.height,skreen.window.width))
pyglet.app.run()
if __name__ == "__main__":
main()
| Python |
import liblo, math
#import pyglet, rabbyt
class touchList(list):
"""a class that responds to OSC messages with TUIO data"""
lastAlive=[]
lastSet=[]
def tuioAlive(self,pointsList):
"""process the arguments of a TUIO Alive message"""
for x in self:
if x[0] not in pointsList:
self.remove(x)
self.statsRemove(x)
def tuioSet(self,values):
"""process a TUIO Set message"""
self.append(values)
self.statsUpdate(values)
def handleOsc(self,path,args,types,src):
"""OSC spawned event callback"""
if args[0]=='alive':
messages=args[1:len(args)]
if messages != self.lastAlive:
self.tuioAlive(messages)
self.lastAlive=messages
if args[0]=='set':
messages=args[1:len(args)]
if messages != self.lastSet:
for x in self:
if x[0]==args[1]:
self.remove(x)
self.tuioSet(args[1:len(args)])
class orienter(touchList):
"""orient the touches to the display"""
##osc
#~ flipX=True
#~ flipY=True
#~ flipXDirection=True
#~ flipYDirection=True
#~ #TuioSimulator
flipX=False
flipY=True
flipXDirection=False
flipYDirection=True
#-------------------------------------------------------------------
def convert(self,x,y):
"""convert a point"""
w=self.window.width
h=self.window.height
if self.flipX:
x=w-(x*w)
else:
x=x*w
if self.flipY:
y=h-(y*h)
else:
y=y*h
return x,y
def convertChange(self,x,y):
"""convert a distance"""
w=self.window.width
h=self.window.height
x,y = self.convert(x,y)
if self.flipX:
x=w-x
if self.flipXDirection:
x=-x
if self.flipY:
y=h-y
if self.flipYDirection:
y=-y
return x,y
class touchStats(orienter):
"""a class to hold observed stats about 2D cursor messages"""
previously=[]
eventStarts=[]
differences=[]
latest=[]
lastDiff=[]
start=False
def calculateDifferences(self):
"""calculate the distance between the current and previous updates"""
for x in self.previously:
if x[0]==self.latest[0]:
then=x
now=self.latest
#~ if self.debug: print "then", then, "now", now
thisDiff=(now[0],now[1]-then[1],now[2]-then[2])
self.differences.append(thisDiff)
self.lastDiff=thisDiff
def determineStart(self):
"""determine if we are processing an initial event"""
self.start=True
for x in self.previously:
if x[0]==self.latest[0]:
self.previously.remove(x)
self.start=False
self.previously.append(self.latest)
if self.start:
self.eventStarts.append(self.latest)
self.lastDiff=[self.latest[0],0,0]
#~ if self.debug: print "START",self.start
def statsUpdate(self,values):
"""update the internal stats with the given value"""
self.latest=values
#~ if self.debug: print "statsUpdate"
#~ if self.debug: print self.latest
#~ if self.debug: print self.previously
#~ if self.debug: print self.eventStarts
self.calculateDifferences()
self.removeItem(self.differences,values[0])
self.determineStart()
#~ if self.debug: print self.latest
#~ if self.debug: print self.previously
#~ if self.debug: print self.eventStarts
#
# self.fingerUpdate() # to be overridden
print "update", self.latest
def removeItem(self,alist,ID):
"""removes the latest event from the specified list"""
#~ if self.debug: print "removeItem"
for x in alist:
if x[0]==ID:
alist.remove(x)
def statsRemove(self,finger):
"""remove the specified values from the various lists"""
#~ if self.debug: print "statsRemove",finger
ID=finger[0]
self.removeItem(self.eventStarts,ID)
self.removeItem(self.differences,ID)
self.removeItem(self.previously,ID)
#~ if self.debug: print finger
#~ if self.debug: print self.previously
#~ if self.debug: print self.eventStarts
#~ if self.debug: print self.differences
#
#self.fingerRemove(finger) #to be overridden
print "remove", finger
class dummy:
pass
class canvas(touchStats):
"""main window"""
#window=pyglet.window.Window()
window=dummy()
window.width=800
window.height=600
debug = False
def redraw(self):
"""clear the window and redraw the deck"""
#pyglet.clock.tick()
#self.window.switch_to()
#self.window.dispatch_events()
#self.window.dispatch_event('on_draw')
#self.window.flip()
pass
def debugtuio(path,args,types,src):
"""print the arguments to the /tuio/2dcur message"""
print args
def main():
def tuio(dt):
"""pyglet callback event to check for OSC messages"""
server.recv(0.0333333333333)
twoDcur=canvas()
server = liblo.Server(3333)
server.add_method('/tuio/2Dcur', None, twoDcur.handleOsc)
#server.add_method('/tuio/2Dcur', None, debugtuio)
while True:
tuio(0)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
# gameswf_batch_test.py -Thatcher Ulrich <tu@tulrich.com> 2005
# This source code has been donated to the Public Domain. Do
# whatever you want with it.
# Script for batch regression tests on gameswf.
import string
import sys
import commands
import difflib
import re
GAMESWF = "./gameswf_test_ogl"
BATCH_ARGS = " -r 0 -1 -t 10 -v "
def run_batch_test(testname, testfile, expected_output):
'''Run gameswf on a test file, and compare its output to the given expected output.
Return an error code and a report string summarizing the results'''
report = "";
success = True;
[status, output] = commands.getstatusoutput(GAMESWF + BATCH_ARGS + testfile)
# Clean up the output.
output = output.splitlines(1)
output = map(fix_string, output) # lose trailing newlines, avoid DOS/Unix confusion
expected_output = map(fix_string, expected_output) # lose trailing newlines, avoid DOS/Unix confusion
if (status != 0):
success = False;
report += format_header_line(testname, "[failed]")
report += " command returned status code " + str(status) + "\n"
report += " command output:\n"
report += " " + string.join(output, " ")
else:
# Let's show the difference between expected and actual output
difference = list(difflib.unified_diff(expected_output, output, "expected", "actual"))
if (len(difference) == 0):
report += format_header_line(testfile, "[OK]")
else:
success = False;
report += format_header_line(testfile, "[failed]")
report += " " + string.join(difference, " ")
return success, report
def format_header_line(test_name, result_tag):
'''Make a nice aligned summary line for the test'''
padding = 70 - len(test_name)
return test_name + ("." * padding) + result_tag + "\n"
def fix_string(s):
'''strip trailing whitespace, add consistent newline'''
return (string.rstrip(s) + '\n')
def next_non_comment_line(f):
'''Read and return the next non-comment line from the given file. If
there are no more lines to read, return the empty string.'''
while 1:
line = f.readline()
if len(line) == 0:
# end of file.
return line
if line[0] != '#':
return line
def parse_testfile(testfile):
'''Given a test filename, returns the name of the test, the SWF
filename for the test, and the expected output of the test.
The name is just the base name of testfile.
The SWF filename is taken from the first line of testfile.
The expected output is taken from the remainder of testfile.
Any lines in the testfile that start with '#' are comments, and are
ignored.
Returns [None,None,None] if the testfile couldn't be parsed.'''
# Pull out the filename part of the testfile, minus any path and
# extension.
m = re.match("(.*\/)?([^\/\.]+)(\.[^\.]*)?$", testfile)
testname = m.group(2)
# Read the test file.
f = file(testfile, "r")
if not f:
return [None, None, None]
# The first non-comment line gives the swf file to run.
swf_file = next_non_comment_line(f).rstrip()
# The rest of the file gives the expected output.
expected = []
while 1:
line = next_non_comment_line(f)
if len(line) > 0:
expected.append(line)
else:
break
f.close()
return testname, swf_file, expected
def do_tests(filenames):
success_count = 0
failed_count = 0
report = ""
for testfile in filenames:
[testname, swf_file, expected_output] = parse_testfile(testfile)
if testname == None:
success = False
rep = format_header_line(testfile, "[failed]\n")
rep += " Couldn't load test file %s\n" % testfile
else:
[success, rep] = run_batch_test(testname, swf_file, expected_output)
if success:
success_count += 1
else:
failed_count += 1
report += rep
sys.stdout.writelines("Test results: " + str(success_count) + "/" + str(success_count + failed_count) + "\n")
sys.stdout.writelines(report)
# These tests should all pass. If you break one of these, it's a
# regression.
passing_tests = [
'tests/frame1.txt',
'tests/frame2.txt',
'tests/test_basic_types.txt',
'tests/test_currentframe.txt',
'tests/test_delete_references.txt',
'tests/test_forin_array.txt',
'tests/test_motion_exec_order.txt',
'tests/test_string.txt',
'tests/test_undefined_v6.txt',
'tests/test_undefined_v7.txt',
# Add more passing tests here, as gameswf improves.
]
# main
# Collect the tests.
if len(sys.argv) < 2:
# No command-line args. Print usage, and run all the passing tests.
print "gameswf_batch_test.py: runs automated tests against gameswf"
print "usage:"
print " %s [list of test files]" % sys.argv[0]
print "If no files are given, runs the list of known tests that should pass.\n"
files = passing_tests
else:
# Run the tests given on the command line.
files = sys.argv[1:]
do_tests(files)
sys.exit(0)
| Python |
#!/usr/bin/python
# gameswf_batch_test.py -Thatcher Ulrich <tu@tulrich.com> 2005
# This source code has been donated to the Public Domain. Do
# whatever you want with it.
# Script to interactively run all the .swf's in samples/ one-by-one.
#
# Pass a path argument to start with a particular test file.
import glob
import commands
import re
import sys
GAMESWF = "./gameswf_test_ogl"
BATCH_ARGS = " -v "
def run_swf(testfile):
'''Run gameswf on a test file.
Returns: True if gameswf exited with OK status, False otherwise.
'''
print "Running: " + testfile
success = True;
[status, output] = commands.getstatusoutput(GAMESWF + BATCH_ARGS + testfile)
print output
if (status != 0):
success = False
return success
# main
def main(argv):
files = glob.glob("samples/*.swf")
# If the user gave a filename, skip ahead until we find it in the list.
if (len(argv) > 1):
while (len(files) and argv[1] != files[0]):
files.pop(0)
for f in files:
run_swf(f)
if __name__ == "__main__":
main(sys.argv)
| Python |
#!/usr/bin/python
# gameswf_batch_test.py -Thatcher Ulrich <tu@tulrich.com> 2005
# This source code has been donated to the Public Domain. Do
# whatever you want with it.
# Script for batch regression tests on gameswf.
import string
import sys
import commands
import difflib
import re
GAMESWF = "./gameswf_test_ogl"
BATCH_ARGS = " -r 0 -1 -t 10 -v "
def run_batch_test(testname, testfile, expected_output):
'''Run gameswf on a test file, and compare its output to the given expected output.
Return an error code and a report string summarizing the results'''
report = "";
success = True;
[status, output] = commands.getstatusoutput(GAMESWF + BATCH_ARGS + testfile)
# Clean up the output.
output = output.splitlines(1)
output = map(fix_string, output) # lose trailing newlines, avoid DOS/Unix confusion
expected_output = map(fix_string, expected_output) # lose trailing newlines, avoid DOS/Unix confusion
if (status != 0):
success = False;
report += format_header_line(testname, "[failed]")
report += " command returned status code " + str(status) + "\n"
report += " command output:\n"
report += " " + string.join(output, " ")
else:
# Let's show the difference between expected and actual output
difference = list(difflib.unified_diff(expected_output, output, "expected", "actual"))
if (len(difference) == 0):
report += format_header_line(testfile, "[OK]")
else:
success = False;
report += format_header_line(testfile, "[failed]")
report += " " + string.join(difference, " ")
return success, report
def format_header_line(test_name, result_tag):
'''Make a nice aligned summary line for the test'''
padding = 70 - len(test_name)
return test_name + ("." * padding) + result_tag + "\n"
def fix_string(s):
'''strip trailing whitespace, add consistent newline'''
return (string.rstrip(s) + '\n')
def next_non_comment_line(f):
'''Read and return the next non-comment line from the given file. If
there are no more lines to read, return the empty string.'''
while 1:
line = f.readline()
if len(line) == 0:
# end of file.
return line
if line[0] != '#':
return line
def parse_testfile(testfile):
'''Given a test filename, returns the name of the test, the SWF
filename for the test, and the expected output of the test.
The name is just the base name of testfile.
The SWF filename is taken from the first line of testfile.
The expected output is taken from the remainder of testfile.
Any lines in the testfile that start with '#' are comments, and are
ignored.
Returns [None,None,None] if the testfile couldn't be parsed.'''
# Pull out the filename part of the testfile, minus any path and
# extension.
m = re.match("(.*\/)?([^\/\.]+)(\.[^\.]*)?$", testfile)
testname = m.group(2)
# Read the test file.
f = file(testfile, "r")
if not f:
return [None, None, None]
# The first non-comment line gives the swf file to run.
swf_file = next_non_comment_line(f).rstrip()
# The rest of the file gives the expected output.
expected = []
while 1:
line = next_non_comment_line(f)
if len(line) > 0:
expected.append(line)
else:
break
f.close()
return testname, swf_file, expected
def do_tests(filenames):
success_count = 0
failed_count = 0
report = ""
for testfile in filenames:
[testname, swf_file, expected_output] = parse_testfile(testfile)
if testname == None:
success = False
rep = format_header_line(testfile, "[failed]\n")
rep += " Couldn't load test file %s\n" % testfile
else:
[success, rep] = run_batch_test(testname, swf_file, expected_output)
if success:
success_count += 1
else:
failed_count += 1
report += rep
sys.stdout.writelines("Test results: " + str(success_count) + "/" + str(success_count + failed_count) + "\n")
sys.stdout.writelines(report)
# These tests should all pass. If you break one of these, it's a
# regression.
passing_tests = [
'tests/frame1.txt',
'tests/frame2.txt',
'tests/test_basic_types.txt',
'tests/test_currentframe.txt',
'tests/test_delete_references.txt',
'tests/test_forin_array.txt',
'tests/test_motion_exec_order.txt',
'tests/test_string.txt',
'tests/test_undefined_v6.txt',
'tests/test_undefined_v7.txt',
# Add more passing tests here, as gameswf improves.
]
# main
# Collect the tests.
if len(sys.argv) < 2:
# No command-line args. Print usage, and run all the passing tests.
print "gameswf_batch_test.py: runs automated tests against gameswf"
print "usage:"
print " %s [list of test files]" % sys.argv[0]
print "If no files are given, runs the list of known tests that should pass.\n"
files = passing_tests
else:
# Run the tests given on the command line.
files = sys.argv[1:]
do_tests(files)
sys.exit(0)
| Python |
#!/usr/bin/python
# gameswf_batch_test.py -Thatcher Ulrich <tu@tulrich.com> 2005
# This source code has been donated to the Public Domain. Do
# whatever you want with it.
# Script to interactively run all the .swf's in samples/ one-by-one.
#
# Pass a path argument to start with a particular test file.
import glob
import commands
import re
import sys
GAMESWF = "./gameswf_test_ogl"
BATCH_ARGS = " -v "
def run_swf(testfile):
'''Run gameswf on a test file.
Returns: True if gameswf exited with OK status, False otherwise.
'''
print "Running: " + testfile
success = True;
[status, output] = commands.getstatusoutput(GAMESWF + BATCH_ARGS + testfile)
print output
if (status != 0):
success = False
return success
# main
def main(argv):
files = glob.glob("samples/*.swf")
# If the user gave a filename, skip ahead until we find it in the list.
if (len(argv) > 1):
while (len(files) and argv[1] != files[0]):
files.pop(0)
for f in files:
run_swf(f)
if __name__ == "__main__":
main(sys.argv)
| Python |
# See http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| Python |
import unittest
from zope.testing import doctestunit
from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.Five import zcml
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
ptc.setupPloneSite()
import Products.my315oktheme
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
ztc.installPackage(Products.my315oktheme)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='Products.my315oktheme',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='Products.my315oktheme.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='Products.my315oktheme',
# test_class=TestCase),
#ztc.FunctionalDocFileSuite(
# 'browser.txt', package='Products.my315oktheme',
# test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| Python |
from plone.theme.interfaces import IDefaultPloneLayer
class IThemeSpecific(IDefaultPloneLayer):
"""Marker interface that defines a Zope 3 browser layer.
If you need to register a viewlet only for the
"My315okTheme" theme, this interface must be its layer
(in my315oktheme/viewlets/configure.zcml).
"""
| Python |
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
# Sample code for a basic viewlet (In order to use it, you'll have to):
# - Un-comment the following useable piece of code (viewlet python class).
# - Rename the viewlet template file ('browser/viewlet.pt') and edit the
# following python code accordingly.
# - Edit the class and template to make them suit your needs.
# - Make sure your viewlet is correctly registered in 'browser/configure.zcml'.
# - If you need it to appear in a specific order inside its viewlet manager,
# edit 'profiles/default/viewlets.xml' accordingly.
# - Restart Zope.
# - If you edited any file in 'profiles/default/', reinstall your package.
# - Once you're happy with your viewlet implementation, remove any related
# (unwanted) inline documentation ;-p
#class MyViewlet(ViewletBase):
# render = ViewPageTemplateFile('viewlet.pt')
#
# def update(self):
# self.computed_value = 'any output'
| Python |
#
| Python |
def setupVarious(context):
# Ordinarily, GenericSetup handlers check for the existence of XML files.
# Here, we are not parsing an XML file, but we use this text file as a
# flag to check that we actually meant for this import step to be run.
# The file is found in profiles/default.
if context.readDataFile('Products.my315oktheme_various.txt') is None:
return
# Add additional setup code here
| Python |
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
| Python |
from setuptools import setup, find_packages
import os
version = '1.0'
setup(name='Products.my315oktheme',
version=version,
description="A theme that integrated portletmanager function to Ptortal-header,portal-footer and body",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='web zope plone theme',
author='adam tang',
author_email='yuejun.tang@gmail.om',
url='http://svn.plone.org/svn/collective/',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['Products'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
setup_requires=["PasteScript"],
paster_plugins = ["ZopeSkel"],
)
| Python |
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Base class for fixers (optional, but recommended)."""
# Python imports
import logging
import itertools
# Local imports
from .patcomp import PatternCompiler
from . import pygram
from .fixer_util import does_tree_import
class BaseFix(object):
"""Optional base class for fixers.
The subclass name must be FixFooBar where FooBar is the result of
removing underscores and capitalizing the words of the fix name.
For example, the class name for a fixer named 'has_key' should be
FixHasKey.
"""
PATTERN = None # Most subclasses should override with a string literal
pattern = None # Compiled pattern, set by compile_pattern()
pattern_tree = None # Tree representation of the pattern
options = None # Options object passed to initializer
filename = None # The filename (set by set_filename)
logger = None # A logger (set by set_filename)
numbers = itertools.count(1) # For new_name()
used_names = set() # A set of all used NAMEs
order = "post" # Does the fixer prefer pre- or post-order traversal
explicit = False # Is this ignored by refactor.py -f all?
run_order = 5 # Fixers will be sorted by run order before execution
# Lower numbers will be run first.
_accept_type = None # [Advanced and not public] This tells RefactoringTool
# which node type to accept when there's not a pattern.
keep_line_order = False # For the bottom matcher: match with the
# original line order
BM_compatible = False # Compatibility with the bottom matching
# module; every fixer should set this
# manually
# Shortcut for access to Python grammar symbols
syms = pygram.python_symbols
def __init__(self, options, log):
"""Initializer. Subclass may override.
Args:
options: an dict containing the options passed to RefactoringTool
that could be used to customize the fixer through the command line.
log: a list to append warnings and other messages to.
"""
self.options = options
self.log = log
self.compile_pattern()
def compile_pattern(self):
"""Compiles self.PATTERN into self.pattern.
Subclass may override if it doesn't want to use
self.{pattern,PATTERN} in .match().
"""
if self.PATTERN is not None:
self.pattern, self.pattern_tree = PatternCompiler().compile_pattern(self.PATTERN, with_tree=True)
def set_filename(self, filename):
"""Set the filename, and a logger derived from it.
The main refactoring tool should call this.
"""
self.filename = filename
self.logger = logging.getLogger(filename)
def match(self, node):
"""Returns match for a given parse tree node.
Should return a true or false object (not necessarily a bool).
It may return a non-empty dict of matching sub-nodes as
returned by a matching pattern.
Subclass may override.
"""
results = {"node": node}
return self.pattern.match(node, results) and results
def transform(self, node, results):
"""Returns the transformation for a given parse tree node.
Args:
node: the root of the parse tree that matched the fixer.
results: a dict mapping symbolic names to part of the match.
Returns:
None, or a node that is a modified copy of the
argument node. The node argument may also be modified in-place to
effect the same change.
Subclass *must* override.
"""
raise NotImplementedError()
def new_name(self, template="xxx_todo_changeme"):
"""Return a string suitable for use as an identifier
The new name is guaranteed not to conflict with other identifiers.
"""
name = template
while name in self.used_names:
name = template + str(next(self.numbers))
self.used_names.add(name)
return name
def log_message(self, message):
if self.first_log:
self.first_log = False
self.log.append("### In file %s ###" % self.filename)
self.log.append(message)
def cannot_convert(self, node, reason=None):
"""Warn the user that a given chunk of code is not valid Python 3,
but that it cannot be converted automatically.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
"""
lineno = node.get_lineno()
for_output = node.clone()
for_output.prefix = ""
msg = "Line %d: could not convert: %s"
self.log_message(msg % (lineno, for_output))
if reason:
self.log_message(reason)
def warning(self, node, reason):
"""Used for warning the user about possible uncertainty in the
translation.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
"""
lineno = node.get_lineno()
self.log_message("Line %d: %s" % (lineno, reason))
def start_tree(self, tree, filename):
"""Some fixers need to maintain tree-wide state.
This method is called once, at the start of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
"""
self.used_names = tree.used_names
self.set_filename(filename)
self.numbers = itertools.count(1)
self.first_log = True
def finish_tree(self, tree, filename):
"""Some fixers need to maintain tree-wide state.
This method is called once, at the conclusion of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
"""
pass
class ConditionalFix(BaseFix):
""" Base class for fixers which not execute if an import is found. """
# This is the name of the import which, if found, will cause the test to be skipped
skip_on = None
def start_tree(self, *args):
super(ConditionalFix, self).start_tree(*args)
self._should_skip = None
def should_skip(self, node):
if self._should_skip is not None:
return self._should_skip
pkg = self.skip_on.split(".")
name = pkg[-1]
pkg = ".".join(pkg[:-1])
self._should_skip = does_tree_import(pkg, name, node)
return self._should_skip
| Python |
"Utility functions used by the btm_matcher module"
from . import pytree
from .pgen2 import grammar, token
from .pygram import pattern_symbols, python_symbols
syms = pattern_symbols.__dict__
pysyms = python_symbols.__dict__
tokens = grammar.opmap
token_labels = token.__dict__
TYPE_ANY = -1
TYPE_ALTERNATIVES = -2
TYPE_GROUP = -3
class MinNode(object):
"""This class serves as an intermediate representation of the
pattern tree during the conversion to sets of leaf-to-root
subpatterns"""
def __init__(self, type=None, name=None, times=1):
self.type = type
self.name = name
self.times = times
self.children = []
self.leaf = False
self.parent = None
self.alternatives = []
self.alternatives_number = None
self.group = []
def __repr__(self):
return str(self.type) + ' ' + str(self.name) + ' ' + str(self.times)
def leaf_to_root(self):
"""Internal method. Returns a characteristic path of the
pattern tree. This method must be run for all leaves until the
linear subpatterns are merged into a single"""
node = self
subp = []
while node:
if node.type == TYPE_ALTERNATIVES:
node.alternatives.append(subp)
if len(node.alternatives) == len(node.children):
#last alternative
subp = [tuple(node.alternatives)]
node.alternatives = []
node = node.parent
continue
else:
node = node.parent
subp = None
break
if node.type == TYPE_GROUP:
node.group.append(subp)
#probably should check the number of leaves
if len(node.group) == len(node.children):
subp = get_characteristic_subpattern(node.group)
node.group = []
node = node.parent
continue
else:
node = node.parent
subp = None
break
if node.type == token_labels['NAME'] and node.name:
#in case of type=name, use the name instead
subp.append(node.name)
else:
subp.append(node.type)
node = node.parent
return subp
def get_linear_subpattern(self):
"""Drives the leaf_to_root method. The reason that
leaf_to_root must be run multiple times is because we need to
reject 'group' matches; for example the alternative form
(a | b c) creates a group [b c] that needs to be matched. Since
matching multiple linear patterns overcomes the automaton's
capabilities, leaf_to_root merges each group into a single
choice based on 'characteristic'ity,
i.e. (a|b c) -> (a|b) if b more characteristic than c
Returns: The most 'characteristic'(as defined by
get_characteristic_subpattern) path for the compiled pattern
tree.
"""
for l in self.leaves():
subp = l.leaf_to_root()
if subp:
return subp
def leaves(self):
"Generator that returns the leaves of the tree"
for child in self.children:
for x in child.leaves():
yield x
if not self.children:
yield self
def reduce_tree(node, parent=None):
"""
Internal function. Reduces a compiled pattern tree to an
intermediate representation suitable for feeding the
automaton. This also trims off any optional pattern elements(like
[a], a*).
"""
new_node = None
#switch on the node type
if node.type == syms['Matcher']:
#skip
new_node = reduce_tree(node.children[0])
elif node.type == syms['Alternatives']:
#2 cases
if len(node.children)<=2:
#just a single 'Alternative', skip this node
new_node = reduce_tree(node.children[0], parent)
elif len(node.children)>2:
#real alternatives
new_node = MinNode(type=TYPE_ALTERNATIVES)
#skip odd children('|' tokens)
for child in node.children:
if node.children.index(child)%2:
continue
reduced = reduce_tree(child, new_node)
if reduced is not None:
new_node.children.append(reduced)
else:
raise Exception
elif node.type == syms['Alternative']:
if len(node.children)>1:
new_node = MinNode(type=TYPE_GROUP)
for child in node.children:
reduced = reduce_tree(child, new_node)
if reduced:
new_node.children.append(reduced)
if not new_node.children:
# delete the group if all of the children were reduced to None
new_node = None
else:
new_node = reduce_tree(node.children[0], parent)
elif node.type == syms['Unit']:
if hasattr(node.children[0], "value") and \
node.children[0].value == '(':
#skip parentheses
return reduce_tree(node.children[1], parent)
if (hasattr(node.children[0], "value") and \
node.children[0].value == '[') \
or \
(len(node.children)>1 and \
hasattr(node.children[1], "value") and \
node.children[1].value == '['):
#skip whole unit if its optional
return None
leaf = True
details_node = None
alternatives_node = None
has_repeater = False
repeater_node = None
has_variable_name = False
for child in node.children:
if child.type == syms['Details']:
leaf = False
details_node = child
elif child.type == syms['Repeater']:
has_repeater = True
repeater_node = child
elif child.type == syms['Alternatives']:
alternatives_node = child
if hasattr(child, 'value') and child.value == '=': # variable name
has_variable_name = True
#skip variable name
if has_variable_name:
#skip variable name, '='
name_leaf = node.children[2]
if hasattr(name_leaf, 'value') and name_leaf.value == '(':
# skip parenthesis
name_leaf = node.children[3]
else:
name_leaf = node.children[0]
#set node type
if name_leaf.type == token_labels['NAME']:
#(python) non-name or wildcard
if name_leaf.value == 'any':
new_node = MinNode(type=TYPE_ANY)
else:
if name_leaf.value in token_labels:
new_node = MinNode(type=token_labels[name_leaf.value])
else:
new_node = MinNode(type=pysyms[name_leaf.value])
elif name_leaf.type == token_labels['STRING']:
#(python) name or character; remove the apostrophes from
#the string value
name = name_leaf.value[1:][:-1]
if name in tokens:
new_node = MinNode(type=tokens[name])
else:
new_node = MinNode(type=token_labels['NAME'], name=name)
elif name_leaf.type == syms['Alternatives']:
new_node = reduce_tree(alternatives_node, parent)
#handle repeaters
if has_repeater:
if repeater_node.children[0].value == '*':
#reduce to None
new_node = None
elif repeater_node.children[0].value == '+':
#reduce to a single occurence i.e. do nothing
pass
else:
#TODO: handle {min, max} repeaters
pass
#add children
if details_node and new_node is not None:
for child in details_node.children[1:][:-1]:
#skip '<', '>' markers
reduced = reduce_tree(child, new_node)
if reduced is not None:
new_node.children.append(reduced)
if new_node:
new_node.parent = parent
return new_node
def get_characteristic_subpattern(subpatterns):
"""Picks the most characteristic from a list of linear patterns
Current order used is:
names > common_names > common_chars
"""
if type(subpatterns) is not list:
return subpatterns
if type(subpatterns) is list and len(subpatterns)==1:
return subpatterns[0]
# first pick out the ones containing variable names
subpatterns_with_names = []
subpatterns_with_common_names = []
common_names = ['in', 'for', 'if' , 'not', 'None']
subpatterns_with_common_chars = []
common_chars = "[]().,:"
for subpattern in subpatterns:
if any(rec_test(subpattern, lambda x: type(x) is str)):
if any(rec_test(subpattern,
lambda x: type(x) is str and x in common_chars)):
subpatterns_with_common_chars.append(subpattern)
elif any(rec_test(subpattern,
lambda x: type(x) is str and x in common_names)):
subpatterns_with_common_names.append(subpattern)
else:
subpatterns_with_names.append(subpattern)
if subpatterns_with_names:
subpatterns = subpatterns_with_names
elif subpatterns_with_common_names:
subpatterns = subpatterns_with_common_names
elif subpatterns_with_common_chars:
subpatterns = subpatterns_with_common_chars
# of the remaining subpatterns pick out the longest one
return sorted(subpatterns, key=len, reverse=True)[0]
def rec_test(sequence, test_func):
"""Tests test_func on all items of sequence and items of included
sub-iterables"""
for x in sequence:
if type(x) is list or type(x) is tuple:
for y in rec_test(x, test_func):
yield y
else:
yield test_func(x)
| Python |
"""
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
Prints output to stdout.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs):
self.nobackups = nobackups
self.show_diffs = show_diffs
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error as err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error as err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print(line)
sys.stdout.flush()
else:
for line in diff_lines:
print(line)
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print("WARNING: %s" % (msg,), file=sys.stderr)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a fixer from being run.")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files.")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print("Available transformations for the -f/--fix option:")
for fixname in refactor.get_all_fix_names(fixer_pkg):
print(fixname)
if not args:
return 0
if not args:
print("At least one file or directory argument required.", file=sys.stderr)
print("Use --help to show usage.", file=sys.stderr)
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print("Can't write to stdin.", file=sys.stderr)
return 2
if options.print_function:
flags["print_function"] = True
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
rt = StdoutRefactoringTool(sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print("Sorry, -j isn't supported on this platform.",
file=sys.stderr)
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
| Python |
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Pattern compiler.
The grammer is taken from PatternGrammar.txt.
The compiler compiles a pattern to a pytree.*Pattern instance.
"""
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
# Fairly local imports
from .pgen2 import driver, literals, token, tokenize, parse, grammar
# Really local imports
from . import pytree
from . import pygram
# The pattern grammar file
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class PatternSyntaxError(Exception):
pass
def tokenize_wrapper(input):
"""Tokenizes a string suppressing significant whitespace."""
skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
tokens = tokenize.generate_tokens(driver.generate_lines(input).__next__)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:
yield quintuple
class PatternCompiler(object):
def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
"""Initializer.
Takes an optional alternative filename for the pattern grammar.
"""
self.grammar = driver.load_grammar(grammar_file)
self.syms = pygram.Symbols(self.grammar)
self.pygrammar = pygram.python_grammar
self.pysyms = pygram.python_symbols
self.driver = driver.Driver(self.grammar, convert=pattern_convert)
def compile_pattern(self, input, debug=False, with_tree=False):
"""Compiles a pattern string to a nested pytree.*Pattern object."""
tokens = tokenize_wrapper(input)
try:
root = self.driver.parse_tokens(tokens, debug=debug)
except parse.ParseError as e:
raise PatternSyntaxError(str(e))
if with_tree:
return self.compile_node(root), root
else:
return self.compile_node(root)
def compile_node(self, node):
"""Compiles a node, recursively.
This is one big switch on the node type.
"""
# XXX Optimize certain Wildcard-containing-Wildcard patterns
# that can be merged
if node.type == self.syms.Matcher:
node = node.children[0] # Avoid unneeded recursion
if node.type == self.syms.Alternatives:
# Skip the odd children since they are just '|' tokens
alts = [self.compile_node(ch) for ch in node.children[::2]]
if len(alts) == 1:
return alts[0]
p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
return p.optimize()
if node.type == self.syms.Alternative:
units = [self.compile_node(ch) for ch in node.children]
if len(units) == 1:
return units[0]
p = pytree.WildcardPattern([units], min=1, max=1)
return p.optimize()
if node.type == self.syms.NegatedUnit:
pattern = self.compile_basic(node.children[1:])
p = pytree.NegatedPattern(pattern)
return p.optimize()
assert node.type == self.syms.Unit
name = None
nodes = node.children
if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
name = nodes[0].value
nodes = nodes[2:]
repeat = None
if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
repeat = nodes[-1]
nodes = nodes[:-1]
# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
pattern = self.compile_basic(nodes, repeat)
if repeat is not None:
assert repeat.type == self.syms.Repeater
children = repeat.children
child = children[0]
if child.type == token.STAR:
min = 0
max = pytree.HUGE
elif child.type == token.PLUS:
min = 1
max = pytree.HUGE
elif child.type == token.LBRACE:
assert children[-1].type == token.RBRACE
assert len(children) in (3, 5)
min = max = self.get_int(children[1])
if len(children) == 5:
max = self.get_int(children[3])
else:
assert False
if min != 1 or max != 1:
pattern = pattern.optimize()
pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
if name is not None:
pattern.name = name
return pattern.optimize()
def compile_basic(self, nodes, repeat=None):
# Compile STRING | NAME [Details] | (...) | [...]
assert len(nodes) >= 1
node = nodes[0]
if node.type == token.STRING:
value = str(literals.evalString(node.value))
return pytree.LeafPattern(_type_of_literal(value), value)
elif node.type == token.NAME:
value = node.value
if value.isupper():
if value not in TOKEN_MAP:
raise PatternSyntaxError("Invalid token: %r" % value)
if nodes[1:]:
raise PatternSyntaxError("Can't have details for token")
return pytree.LeafPattern(TOKEN_MAP[value])
else:
if value == "any":
type = None
elif not value.startswith("_"):
type = getattr(self.pysyms, value, None)
if type is None:
raise PatternSyntaxError("Invalid symbol: %r" % value)
if nodes[1:]: # Details present
content = [self.compile_node(nodes[1].children[1])]
else:
content = None
return pytree.NodePattern(type, content)
elif node.value == "(":
return self.compile_node(nodes[1])
elif node.value == "[":
assert repeat is None
subpattern = self.compile_node(nodes[1])
return pytree.WildcardPattern([[subpattern]], min=0, max=1)
assert False, node
def get_int(self, node):
assert node.type == token.NUMBER
return int(node.value)
# Map named tokens to the type value for a LeafPattern
TOKEN_MAP = {"NAME": token.NAME,
"STRING": token.STRING,
"NUMBER": token.NUMBER,
"TOKEN": None}
def _type_of_literal(value):
if value[0].isalpha():
return token.NAME
elif value in grammar.opmap:
return grammar.opmap[value]
else:
return None
def pattern_convert(grammar, raw_node_info):
"""Converts raw node information to a Node or Leaf instance."""
type, value, context, children = raw_node_info
if children or type in grammar.number2symbol:
return pytree.Node(type, children, context=context)
else:
return pytree.Leaf(type, value, context=context)
def compile_pattern(pattern):
return PatternCompiler().compile_pattern(pattern)
| Python |
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""
# Python imports
import os
# Local imports
from .pgen2 import token
from .pgen2 import driver
from . import pytree
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class Symbols(object):
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.items():
setattr(self, name, symbol)
python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
| Python |
"""Utility functions, node construction macros, etc."""
# Author: Collin Winter
# Local imports
from .pgen2 import token
from .pytree import Leaf, Node
from .pygram import python_symbols as syms
from . import patcomp
###########################################################
### Common node-construction "macros"
###########################################################
def KeywordArg(keyword, value):
return Node(syms.argument,
[keyword, Leaf(token.EQUAL, '='), value])
def LParen():
return Leaf(token.LPAR, "(")
def RParen():
return Leaf(token.RPAR, ")")
def Assign(target, source):
"""Build an assignment statement"""
if not isinstance(target, list):
target = [target]
if not isinstance(source, list):
source.prefix = " "
source = [source]
return Node(syms.atom,
target + [Leaf(token.EQUAL, "=", prefix=" ")] + source)
def Name(name, prefix=None):
"""Return a NAME leaf"""
return Leaf(token.NAME, name, prefix=prefix)
def Attr(obj, attr):
"""A node tuple for obj.attr"""
return [obj, Node(syms.trailer, [Dot(), attr])]
def Comma():
"""A comma leaf"""
return Leaf(token.COMMA, ",")
def Dot():
"""A period (.) leaf"""
return Leaf(token.DOT, ".")
def ArgList(args, lparen=LParen(), rparen=RParen()):
"""A parenthesised argument list, used by Call()"""
node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
if args:
node.insert_child(1, Node(syms.arglist, args))
return node
def Call(func_name, args=None, prefix=None):
"""A function call"""
node = Node(syms.power, [func_name, ArgList(args)])
if prefix is not None:
node.prefix = prefix
return node
def Newline():
"""A newline literal"""
return Leaf(token.NEWLINE, "\n")
def BlankLine():
"""A blank line"""
return Leaf(token.NEWLINE, "")
def Number(n, prefix=None):
return Leaf(token.NUMBER, n, prefix=prefix)
def Subscript(index_node):
"""A numeric or string subscript"""
return Node(syms.trailer, [Leaf(token.LBRACE, '['),
index_node,
Leaf(token.RBRACE, ']')])
def String(string, prefix=None):
"""A string leaf"""
return Leaf(token.STRING, string, prefix=prefix)
def ListComp(xp, fp, it, test=None):
"""A list comprehension of the form [xp for fp in it if test].
If test is None, the "if test" part is omitted.
"""
xp.prefix = ""
fp.prefix = " "
it.prefix = " "
for_leaf = Leaf(token.NAME, "for")
for_leaf.prefix = " "
in_leaf = Leaf(token.NAME, "in")
in_leaf.prefix = " "
inner_args = [for_leaf, fp, in_leaf, it]
if test:
test.prefix = " "
if_leaf = Leaf(token.NAME, "if")
if_leaf.prefix = " "
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
return Node(syms.atom,
[Leaf(token.LBRACE, "["),
inner,
Leaf(token.RBRACE, "]")])
def FromImport(package_name, name_leafs):
""" Return an import statement in the form:
from package import name_leafs"""
# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
#assert package_name == '.' or '.' not in package_name, "FromImport has "\
# "not been tested with dotted package names -- use at your own "\
# "peril!"
for leaf in name_leafs:
# Pull the leaves out of their old tree
leaf.remove()
children = [Leaf(token.NAME, 'from'),
Leaf(token.NAME, package_name, prefix=" "),
Leaf(token.NAME, 'import', prefix=" "),
Node(syms.import_as_names, name_leafs)]
imp = Node(syms.import_from, children)
return imp
###########################################################
### Determine whether a node represents a given literal
###########################################################
def is_tuple(node):
"""Does the node represent a tuple literal?"""
if isinstance(node, Node) and node.children == [LParen(), RParen()]:
return True
return (isinstance(node, Node)
and len(node.children) == 3
and isinstance(node.children[0], Leaf)
and isinstance(node.children[1], Node)
and isinstance(node.children[2], Leaf)
and node.children[0].value == "("
and node.children[2].value == ")")
def is_list(node):
"""Does the node represent a list literal?"""
return (isinstance(node, Node)
and len(node.children) > 1
and isinstance(node.children[0], Leaf)
and isinstance(node.children[-1], Leaf)
and node.children[0].value == "["
and node.children[-1].value == "]")
###########################################################
### Misc
###########################################################
def parenthesize(node):
return Node(syms.atom, [LParen(), node, RParen()])
consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
"min", "max"])
def attr_chain(obj, attr):
"""Follow an attribute chain.
If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
use this to iterate over all objects in the chain. Iteration is
terminated by getattr(x, attr) is None.
Args:
obj: the starting object
attr: the name of the chaining attribute
Yields:
Each successive object in the chain.
"""
next = getattr(obj, attr)
while next:
yield next
next = getattr(next, attr)
p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p1 = """
power<
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
'any' | 'all' | (any* trailer< '.' 'join' >) )
trailer< '(' node=any ')' >
any*
>
"""
p2 = """
power<
'sorted'
trailer< '(' arglist<node=any any*> ')' >
any*
>
"""
pats_built = False
def in_special_context(node):
""" Returns true if node is in an environment where all that is required
of it is being itterable (ie, it doesn't matter if it returns a list
or an itterator).
See test_map_nochange in test_fixers.py for some examples and tests.
"""
global p0, p1, p2, pats_built
if not pats_built:
p1 = patcomp.compile_pattern(p1)
p0 = patcomp.compile_pattern(p0)
p2 = patcomp.compile_pattern(p2)
pats_built = True
patterns = [p0, p1, p2]
for pattern, parent in zip(patterns, attr_chain(node, "parent")):
results = {}
if pattern.match(parent, results) and results["node"] is node:
return True
return False
def is_probably_builtin(node):
"""
Check that something isn't an attribute or function name etc.
"""
prev = node.prev_sibling
if prev is not None and prev.type == token.DOT:
# Attribute lookup.
return False
parent = node.parent
if parent.type in (syms.funcdef, syms.classdef):
return False
if parent.type == syms.expr_stmt and parent.children[0] is node:
# Assignment.
return False
if parent.type == syms.parameters or \
(parent.type == syms.typedargslist and (
(prev is not None and prev.type == token.COMMA) or
parent.children[0] is node
)):
# The name of an argument.
return False
return True
###########################################################
### The following functions are to find bindings in a suite
###########################################################
def make_suite(node):
if node.type == syms.suite:
return node
node = node.clone()
parent, node.parent = node.parent, None
suite = Node(syms.suite, [node])
suite.parent = parent
return suite
def find_root(node):
"""Find the top level namespace."""
# Scamper up to the top level namespace
while node.type != syms.file_input:
assert node.parent, "Tree is insane! root found before "\
"file_input node was found."
node = node.parent
return node
def does_tree_import(package, name, node):
""" Returns true if name is imported from package at the
top level of the tree which node belongs to.
To cover the case of an import like 'import foo', use
None for the package and 'foo' for the name. """
binding = find_binding(name, find_root(node), package)
return bool(binding)
def is_import(node):
"""Returns true if the node is an import statement."""
return node.type in (syms.import_name, syms.import_from)
def touch_import(package, name, node):
""" Works like `does_tree_import` but adds an import statement
if it was not imported. """
def is_import_stmt(node):
return node.type == syms.simple_stmt and node.children and \
is_import(node.children[0])
root = find_root(node)
if does_tree_import(package, name, root):
return
# figure out where to insert the new import. First try to find
# the first import and then skip to the last one.
insert_pos = offset = 0
for idx, node in enumerate(root.children):
if not is_import_stmt(node):
continue
for offset, node2 in enumerate(root.children[idx:]):
if not is_import_stmt(node2):
break
insert_pos = idx + offset
break
# if there are no imports where we can insert, find the docstring.
# if that also fails, we stick to the beginning of the file
if insert_pos == 0:
for idx, node in enumerate(root.children):
if node.type == syms.simple_stmt and node.children and \
node.children[0].type == token.STRING:
insert_pos = idx + 1
break
if package is None:
import_ = Node(syms.import_name, [
Leaf(token.NAME, 'import'),
Leaf(token.NAME, name, prefix=' ')
])
else:
import_ = FromImport(package, [Leaf(token.NAME, name, prefix=' ')])
children = [import_, Newline()]
root.insert_child(insert_pos, Node(syms.simple_stmt, children))
_def_syms = set([syms.classdef, syms.funcdef])
def find_binding(name, node, package=None):
""" Returns the node which binds variable name, otherwise None.
If optional argument package is supplied, only imports will
be returned.
See test cases for examples."""
for child in node.children:
ret = None
if child.type == syms.for_stmt:
if _find(name, child.children[1]):
return child
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type in (syms.if_stmt, syms.while_stmt):
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type == syms.try_stmt:
n = find_binding(name, make_suite(child.children[2]), package)
if n:
ret = n
else:
for i, kid in enumerate(child.children[3:]):
if kid.type == token.COLON and kid.value == ":":
# i+3 is the colon, i+4 is the suite
n = find_binding(name, make_suite(child.children[i+4]), package)
if n: ret = n
elif child.type in _def_syms and child.children[1].value == name:
ret = child
elif _is_import_binding(child, name, package):
ret = child
elif child.type == syms.simple_stmt:
ret = find_binding(name, child, package)
elif child.type == syms.expr_stmt:
if _find(name, child.children[0]):
ret = child
if ret:
if not package:
return ret
if is_import(ret):
return ret
return None
_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
def _find(name, node):
nodes = [node]
while nodes:
node = nodes.pop()
if node.type > 256 and node.type not in _block_syms:
nodes.extend(node.children)
elif node.type == token.NAME and node.value == name:
return node
return None
def _is_import_binding(node, name, package=None):
""" Will reuturn node if node will import name, or node
will import * from package. None is returned otherwise.
See test cases for examples. """
if node.type == syms.import_name and not package:
imp = node.children[1]
if imp.type == syms.dotted_as_names:
for child in imp.children:
if child.type == syms.dotted_as_name:
if child.children[2].value == name:
return node
elif child.type == token.NAME and child.value == name:
return node
elif imp.type == syms.dotted_as_name:
last = imp.children[-1]
if last.type == token.NAME and last.value == name:
return node
elif imp.type == token.NAME and imp.value == name:
return node
elif node.type == syms.import_from:
# str(...) is used to make life easier here, because
# from a.b import parses to ['import', ['a', '.', 'b'], ...]
if package and str(node.children[1]).strip() != package:
return None
n = node.children[3]
if package and _find('as', n):
# See test_from_import_as for explanation
return None
elif n.type == syms.import_as_names and _find(name, n):
return node
elif n.type == syms.import_as_name:
child = n.children[2]
if child.type == token.NAME and child.value == name:
return node
elif n.type == token.NAME and n.value == name:
return node
elif package and n.type == token.STAR:
return node
return None
| Python |
#! /usr/bin/env python3
"""Token constants (from "token.h")."""
# Taken from Python (r53757) and modified to include some tokens
# originally monkeypatched in by pgen2.tokenize
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
COMMENT = 52
NL = 53
RARROW = 54
ERRORTOKEN = 55
N_TOKENS = 56
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in list(globals().items()):
if type(_value) is type(0):
tok_name[_value] = _name
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
| Python |
#! /usr/bin/env python3
"""Token constants (from "token.h")."""
# Taken from Python (r53757) and modified to include some tokens
# originally monkeypatched in by pgen2.tokenize
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
COMMENT = 52
NL = 53
RARROW = 54
ERRORTOKEN = 55
N_TOKENS = 56
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in list(globals().items()):
if type(_value) is type(0):
tok_name[_value] = _name
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
| Python |
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| Python |
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Convert graminit.[ch] spit out by pgen to Python code.
Pgen is the Python parser generator. It is useful to quickly create a
parser from a grammar file in Python's grammar notation. But I don't
want my parsers to be written in C (yet), so I'm translating the
parsing tables to Python data structures and writing a Python parse
engine.
Note that the token numbers are constants determined by the standard
Python tokenizer. The standard token module defines these numbers and
their names (the names are not used much). The token numbers are
hardcoded into the Python tokenizer and into pgen. A Python
implementation of the Python tokenizer is also available, in the
standard tokenize module.
On the other hand, symbol numbers (representing the grammar's
non-terminals) are assigned by pgen based on the actual grammar
input.
Note: this module is pretty much obsolete; the pgen module generates
equivalent grammar tables directly from the Grammar.txt input file
without having to invoke the Python pgen C program.
"""
# Python imports
import re
# Local imports
from pgen2 import grammar, token
class Converter(grammar.Grammar):
"""Grammar subclass that reads classic pgen output files.
The run() method reads the tables as produced by the pgen parser
generator, typically contained in two C files, graminit.h and
graminit.c. The other methods are for internal use only.
See the base class for more documentation.
"""
def run(self, graminit_h, graminit_c):
"""Load the grammar tables from the text files written by pgen."""
self.parse_graminit_h(graminit_h)
self.parse_graminit_c(graminit_c)
self.finish_off()
def parse_graminit_h(self, filename):
"""Parse the .h file writen by pgen. (Internal)
This file is a sequence of #define statements defining the
nonterminals of the grammar as numbers. We build two tables
mapping the numbers to names and back.
"""
try:
f = open(filename)
except IOError as err:
print("Can't open %s: %s" % (filename, err))
return False
self.symbol2number = {}
self.number2symbol = {}
lineno = 0
for line in f:
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
print("%s(%s): can't parse %s" % (filename, lineno,
line.strip()))
else:
symbol, number = mo.groups()
number = int(number)
assert symbol not in self.symbol2number
assert number not in self.number2symbol
self.symbol2number[symbol] = number
self.number2symbol[number] = symbol
return True
def parse_graminit_c(self, filename):
"""Parse the .c file writen by pgen. (Internal)
The file looks as follows. The first two lines are always this:
#include "pgenheaders.h"
#include "grammar.h"
After that come four blocks:
1) one or more state definitions
2) a table defining dfas
3) a table defining labels
4) a struct defining the grammar
A state definition has the following form:
- one or more arc arrays, each of the form:
static arc arcs_<n>_<m>[<k>] = {
{<i>, <j>},
...
};
- followed by a state array, of the form:
static state states_<s>[<t>] = {
{<k>, arcs_<n>_<m>},
...
};
"""
try:
f = open(filename)
except IOError as err:
print("Can't open %s: %s" % (filename, err))
return False
# The code below essentially uses f's iterator-ness!
lineno = 0
# Expect the two #include lines
lineno, line = lineno+1, next(f)
assert line == '#include "pgenheaders.h"\n', (lineno, line)
lineno, line = lineno+1, next(f)
assert line == '#include "grammar.h"\n', (lineno, line)
# Parse the state definitions
lineno, line = lineno+1, next(f)
allarcs = {}
states = []
while line.startswith("static arc "):
while line.startswith("static arc "):
mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
line)
assert mo, (lineno, line)
n, m, k = list(map(int, mo.groups()))
arcs = []
for _ in range(k):
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+{(\d+), (\d+)},$", line)
assert mo, (lineno, line)
i, j = list(map(int, mo.groups()))
arcs.append((i, j))
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
allarcs[(n, m)] = arcs
lineno, line = lineno+1, next(f)
mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
assert mo, (lineno, line)
s, t = list(map(int, mo.groups()))
assert s == len(states), (lineno, line)
state = []
for _ in range(t):
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
assert mo, (lineno, line)
k, n, m = list(map(int, mo.groups()))
arcs = allarcs[n, m]
assert k == len(arcs), (lineno, line)
state.append(arcs)
states.append(state)
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
lineno, line = lineno+1, next(f)
self.states = states
# Parse the dfas
dfas = {}
mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
for i in range(ndfas):
lineno, line = lineno+1, next(f)
mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
line)
assert mo, (lineno, line)
symbol = mo.group(2)
number, x, y, z = list(map(int, mo.group(1, 3, 4, 5)))
assert self.symbol2number[symbol] == number, (lineno, line)
assert self.number2symbol[number] == symbol, (lineno, line)
assert x == 0, (lineno, line)
state = states[z]
assert y == len(state), (lineno, line)
lineno, line = lineno+1, next(f)
mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
assert mo, (lineno, line)
first = {}
rawbitset = eval(mo.group(1))
for i, c in enumerate(rawbitset):
byte = ord(c)
for j in range(8):
if byte & (1<<j):
first[i*8 + j] = 1
dfas[number] = (state, first)
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
self.dfas = dfas
# Parse the labels
labels = []
lineno, line = lineno+1, next(f)
mo = re.match(r"static label labels\[(\d+)\] = {$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
for i in range(nlabels):
lineno, line = lineno+1, next(f)
mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
assert mo, (lineno, line)
x, y = mo.groups()
x = int(x)
if y == "0":
y = None
else:
y = eval(y)
labels.append((x, y))
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
self.labels = labels
# Parse the grammar struct
lineno, line = lineno+1, next(f)
assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+(\d+),$", line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
assert ndfas == len(self.dfas)
lineno, line = lineno+1, next(f)
assert line == "\tdfas,\n", (lineno, line)
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+{(\d+), labels},$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
assert nlabels == len(self.labels), (lineno, line)
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+(\d+)$", line)
assert mo, (lineno, line)
start = int(mo.group(1))
assert start in self.number2symbol, (lineno, line)
self.start = start
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
try:
lineno, line = lineno+1, next(f)
except StopIteration:
pass
else:
assert 0, (lineno, line)
def finish_off(self):
"""Create additional useful structures. (Internal)."""
self.keywords = {} # map from keyword strings to arc labels
self.tokens = {} # map from numeric token values to arc labels
for ilabel, (type, value) in enumerate(self.labels):
if type == token.NAME and value is not None:
self.keywords[value] = ilabel
elif value is None:
self.tokens[type] = ilabel
| Python |
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Safely evaluate Python string literals without using eval()."""
import re
simple_escapes = {"a": "\a",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
"'": "'",
'"': '"',
"\\": "\\"}
def escape(m):
all, tail = m.group(0, 1)
assert all.startswith("\\")
esc = simple_escapes.get(tail)
if esc is not None:
return esc
if tail.startswith("x"):
hexes = tail[1:]
if len(hexes) < 2:
raise ValueError("invalid hex string escape ('\\%s')" % tail)
try:
i = int(hexes, 16)
except ValueError:
raise ValueError("invalid hex string escape ('\\%s')" % tail)
else:
try:
i = int(tail, 8)
except ValueError:
raise ValueError("invalid octal string escape ('\\%s')" % tail)
return chr(i)
def evalString(s):
assert s.startswith("'") or s.startswith('"'), repr(s[:1])
q = s[0]
if s[:3] == q*3:
q = q*3
assert s.endswith(q), repr(s[-len(q):])
assert len(s) >= 2*len(q)
s = s[len(q):-len(q)]
return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)
def test():
for i in range(256):
c = chr(i)
s = repr(c)
e = evalString(s)
if e != c:
print(i, c, s, e)
if __name__ == "__main__":
test()
| Python |
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Pgen imports
from . import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
pass
class ParserGenerator(object):
def __init__(self, filename, stream=None):
close_stream = None
if stream is None:
stream = open(filename)
close_stream = stream.close
self.filename = filename
self.stream = stream
self.generator = tokenize.generate_tokens(stream.readline)
self.gettoken() # Initialize lookahead
self.dfas, self.startsymbol = self.parse()
if close_stream is not None:
close_stream()
self.first = {} # map from symbol name to set of tokens
self.addfirstsets()
def make_grammar(self):
c = PgenGrammar()
names = list(self.dfas.keys())
names.sort()
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
for name in names:
i = 256 + len(c.symbol2number)
c.symbol2number[name] = i
c.number2symbol[i] = name
for name in names:
dfa = self.dfas[name]
states = []
for state in dfa:
arcs = []
for label, next in state.arcs.items():
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
states.append(arcs)
c.states.append(states)
c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
c.start = c.symbol2number[self.startsymbol]
return c
def make_first(self, c, name):
rawfirst = self.first[name]
first = {}
for label in rawfirst:
ilabel = self.make_label(c, label)
##assert ilabel not in first # XXX failed on <> ... !=
first[ilabel] = 1
return first
def make_label(self, c, label):
# XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
# Either a symbol name or a named token
if label in c.symbol2number:
# A symbol name (a non-terminal)
if label in c.symbol2label:
return c.symbol2label[label]
else:
c.labels.append((c.symbol2number[label], None))
c.symbol2label[label] = ilabel
return ilabel
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
assert isinstance(itoken, int), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
value = eval(label)
if value[0].isalpha():
# A keyword
if value in c.keywords:
return c.keywords[value]
else:
c.labels.append((token.NAME, value))
c.keywords[value] = ilabel
return ilabel
else:
# An operator (any non-numeric token)
itoken = grammar.opmap[value] # Fails if unknown token
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
def addfirstsets(self):
names = list(self.dfas.keys())
names.sort()
for name in names:
if name not in self.first:
self.calcfirst(name)
#print name, self.first[name].keys()
def calcfirst(self, name):
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
totalset = {}
overlapcheck = {}
for label, next in state.arcs.items():
if label in self.dfas:
if label in self.first:
fset = self.first[label]
if fset is None:
raise ValueError("recursion for rule %r" % name)
else:
self.calcfirst(label)
fset = self.first[label]
totalset.update(fset)
overlapcheck[label] = fset
else:
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
for label, itsfirst in overlapcheck.items():
for symbol in itsfirst:
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
" first sets of %s as well as %s" %
(name, symbol, label, inverse[symbol]))
inverse[symbol] = label
self.first[name] = totalset
def parse(self):
dfas = {}
startsymbol = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
while self.type != token.ENDMARKER:
while self.type == token.NEWLINE:
self.gettoken()
# RULE: NAME ':' RHS NEWLINE
name = self.expect(token.NAME)
self.expect(token.OP, ":")
a, z = self.parse_rhs()
self.expect(token.NEWLINE)
#self.dump_nfa(name, a, z)
dfa = self.make_dfa(a, z)
#self.dump_dfa(name, dfa)
oldlen = len(dfa)
self.simplify_dfa(dfa)
newlen = len(dfa)
dfas[name] = dfa
#print name, oldlen, newlen
if startsymbol is None:
startsymbol = name
return dfas, startsymbol
def make_dfa(self, start, finish):
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction. Let's represent sets as dicts with 1 for
# values.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def closure(state):
base = {}
addclosure(state, base)
return base
def addclosure(state, base):
assert isinstance(state, NFAState)
if state in base:
return
base[state] = 1
for label, next in state.arcs:
if label is None:
addclosure(next, base)
states = [DFAState(closure(start), finish)]
for state in states: # NB states grows while we're iterating
arcs = {}
for nfastate in state.nfaset:
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
for label, nfaset in arcs.items():
for st in states:
if st.nfaset == nfaset:
break
else:
st = DFAState(nfaset, finish)
states.append(st)
state.addarc(st, label)
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
print(" State", i, state is finish and "(final)" or "")
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
else:
j = len(todo)
todo.append(next)
if label is None:
print(" -> %d" % j)
else:
print(" %s -> %d" % (label, j))
def dump_dfa(self, name, dfa):
print("Dump of DFA for", name)
for i, state in enumerate(dfa):
print(" State", i, state.isfinal and "(final)" or "")
for label, next in state.arcs.items():
print(" %s -> %d" % (label, dfa.index(next)))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
# Algorithm: repeatedly look for two states that have the same
# set of arcs (same labels pointing to the same nodes) and
# unify them, until things stop changing.
# dfa is a list of DFAState instances
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfa):
for j in range(i+1, len(dfa)):
state_j = dfa[j]
if state_i == state_j:
#print " unify", i, j
del dfa[j]
for state in dfa:
state.unifystate(state_j, state_i)
changes = True
break
def parse_rhs(self):
# RHS: ALT ('|' ALT)*
a, z = self.parse_alt()
if self.value != "|":
return a, z
else:
aa = NFAState()
zz = NFAState()
aa.addarc(a)
z.addarc(zz)
while self.value == "|":
self.gettoken()
a, z = self.parse_alt()
aa.addarc(a)
z.addarc(zz)
return aa, zz
def parse_alt(self):
# ALT: ITEM+
a, b = self.parse_item()
while (self.value in ("(", "[") or
self.type in (token.NAME, token.STRING)):
c, d = self.parse_item()
b.addarc(c)
b = d
return a, b
def parse_item(self):
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
if self.value == "[":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, "]")
a.addarc(z)
return a, z
else:
a, z = self.parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self.gettoken()
z.addarc(a)
if value == "+":
return a, z
else:
return a, a
def parse_atom(self):
# ATOM: '(' RHS ')' | NAME | STRING
if self.value == "(":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, ")")
return a, z
elif self.type in (token.NAME, token.STRING):
a = NFAState()
z = NFAState()
a.addarc(z, self.value)
self.gettoken()
return a, z
else:
self.raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def expect(self, type, value=None):
if self.type != type or (value is not None and self.value != value):
self.raise_error("expected %s/%s, got %s/%s",
type, value, self.type, self.value)
value = self.value
self.gettoken()
return value
def gettoken(self):
tup = next(self.generator)
while tup[0] in (tokenize.COMMENT, tokenize.NL):
tup = next(self.generator)
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
def raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + list(map(str, args)))
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
class NFAState(object):
def __init__(self):
self.arcs = [] # list of (label, NFAState) pairs
def addarc(self, next, label=None):
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
assert isinstance(next(iter(nfaset)), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState
def addarc(self, next, label):
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
self.arcs[label] = next
def unifystate(self, old, new):
for label, next in self.arcs.items():
if next is old:
self.arcs[label] = new
def __eq__(self, other):
# Equality test -- ignore the nfaset instance variable
assert isinstance(other, DFAState)
if self.isfinal != other.isfinal:
return False
# Can't just return self.arcs == other.arcs, because that
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next in self.arcs.items():
if next is not other.arcs.get(label):
return False
return True
__hash__ = None # For Py3 compatibility.
def generate_grammar(filename="Grammar.txt"):
p = ParserGenerator(filename)
return p.make_grammar()
| Python |
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
import pickle
# Local imports
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def copy(self):
"""
Copy the grammar.
"""
new = self.__class__()
for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
"tokens", "symbol2label"):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
return new
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print("s2n")
pprint(self.symbol2number)
print("n2s")
pprint(self.number2symbol)
print("states")
pprint(self.states)
print("dfas")
pprint(self.dfas)
print("labels")
pprint(self.labels)
print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
| Python |
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""The pgen2 package."""
| Python |
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <guido@python.org>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import codecs
import os
import logging
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = ""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(generate_lines(text).__next__)
return self.parse_tokens(tokens, debug)
def generate_lines(text):
"""Generator that behaves like readline without using StringIO."""
for line in text.splitlines(True):
yield line
while True:
yield ""
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except IOError as e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
| Python |
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""
Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
__author__ = "Guido van Rossum <guido@python.org>"
import sys
import warnings
from io import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
class Base(object):
"""
Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
was_checked = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""
Compare two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
__hash__ = None # For Py3 compatibility.
def __ne__(self, other):
"""
Compare two nodes for inequality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
def _eq(self, other):
"""
Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self):
"""
Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""
Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def set_prefix(self, prefix):
"""
Set the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn("set_prefix() is deprecated; use the prefix property",
DeprecationWarning, stacklevel=2)
self.prefix = prefix
def get_prefix(self):
"""
Return the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn("get_prefix() is deprecated; use the prefix property",
DeprecationWarning, stacklevel=2)
return self.prefix
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""
Remove the node from the tree. Returns the position of the node in its
parent's children before it was removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
@property
def next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
@property
def prev_sibling(self):
"""
The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def leaves(self):
for child in self.children:
for x in child.leaves():
yield x
if type(self) is Leaf:
yield self
def to_root(self):
yield self
if self.parent:
for p in self.parent.to_root():
yield p
def depth(self):
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self):
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return ""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
return str(self).encode("ascii")
class Node(Base):
"""Concrete implementation for interior nodes."""
def __init__(self,type, children,
context=None,
prefix=None,
fixers_applied=[]):
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
if prefix is not None:
self.prefix = prefix
self.fixers_applied = fixers_applied[:]
def __hash__(self):
return self.node_id
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return "".join(map(str, self.children))
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Node(self.type, [ch.clone() for ch in self.children],
fixers_applied=self.fixers_applied)
def post_order(self):
"""Return a post-order iterator for the tree."""
for child in self.children:
for node in child.post_order():
yield node
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
for child in self.children:
for node in child.post_order():
yield node
def _prefix_getter(self):
"""
The whitespace and comments preceding this node in the input.
"""
if not self.children:
return ""
return self.children[0].prefix
def _prefix_setter(self, prefix):
if self.children:
self.children[0].prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def set_child(self, i, child):
"""
Equivalent to 'node.children[i] = child'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
def insert_child(self, i, child):
"""
Equivalent to 'node.children.insert(i, child)'. This method also sets
the child's parent attribute appropriately.
"""
child.parent = self
self.children.insert(i, child)
self.changed()
def append_child(self, child):
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed()
class Leaf(Base):
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
_prefix = "" # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value,
context=None,
prefix=None,
fixers_applied=[]):
"""
Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument.
"""
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied = fixers_applied[:]
def __hash__(self):
return self.node_id
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.type,
self.value)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + str(self.value)
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Leaf(self.type, self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied)
def post_order(self):
"""Return a post-order iterator for the tree."""
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
def _prefix_getter(self):
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
def _prefix_setter(self, prefix):
self.changed()
self._prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
class BasePattern(object):
"""
A pattern is a tree matching pattern.
It looks for a specific node type (token or symbol), and
optionally for a specific content.
This is an abstract base class. There are three concrete
subclasses:
- LeafPattern matches a single leaf node;
- NodePattern matches a single node (usually non-leaf);
- WildcardPattern matches a sequence of nodes of variable length.
"""
# Defaults for instance variables
type = None # Node type (token if < 256, symbol if >= 256)
content = None # Optional content matching pattern
name = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self):
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
def optimize(self):
"""
A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.
"""
return self
def match(self, node, results=None):
"""
Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.
"""
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes, results=None):
"""
Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes):
"""
Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.
"""
r = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
class NodePattern(BasePattern):
wildcards = False
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, str), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
"""
A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
def __init__(self, content=None, min=0, max=HUGE, name=None):
"""
Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optinal minumum number of times to match, default 0
max: optional maximum number of times tro match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
content = tuple(map(tuple, content)) # Protect against alterations
# Check sanity of alternatives
assert len(content), repr(content) # Can't have zero alternatives
for alt in content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = content
self.min = min
self.max = max
self.name = name
def optimize(self):
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (self.content is not None and
len(self.content) == 1 and len(self.content[0]) == 1):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
subpattern.min <= 1 and self.name == subpattern.name):
return WildcardPattern(subpattern.content,
self.min*subpattern.min,
self.max*subpattern.max,
subpattern.name)
return self
def match(self, node, results=None):
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None):
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes):
"""
Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in range(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored.
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
sys.stderr = save_stderr
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes):
"""Special optimized matcher for bare_name."""
count = 0
r = {}
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count):
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content=None):
"""
Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node):
# We never match a node in its entirety
return False
def match_seq(self, nodes):
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes):
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(patterns, nodes):
"""
Generator yielding matches for a sequence of patterns and nodes.
Args:
patterns: a sequence of patterns
nodes: a sequence of nodes
Yields:
(count, results) tuples where:
count: the entire sequence of patterns matches nodes[:count];
results: dict containing named submatches.
"""
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
| Python |
"""A bottom-up tree matching algorithm implementation meant to speed
up 2to3's matching process. After the tree patterns are reduced to
their rarest linear path, a linear Aho-Corasick automaton is
created. The linear automaton traverses the linear paths from the
leaves to the root of the AST and returns a set of nodes for further
matching. This reduces significantly the number of candidate nodes."""
__author__ = "George Boutsioukis <gboutsioukis@gmail.com>"
import logging
from .btm_utils import *
class BMNode(object):
"""Class for a node of the Aho-Corasick automaton used in matching"""
last_id = 0
def __init__(self):
self.transition_table = {}
self.fixers = []
self.id = BMNode.new_id()
self.content = ''
@classmethod
def new_id(cls):
new_id = cls.last_id
cls.last_id += 1
return new_id
class BottomMatcher(object):
"""The main matcher class. After instantiating the patterns should
be added using the add_fixer method"""
def __init__(self):
self.match = set()
self.root = BMNode()
self.nodes = [self.root]
self.fixers = []
self.logger = logging.getLogger("RefactoringTool")
def add_fixer(self, fixer):
"""Reduces a fixer's pattern tree to a linear path and adds it
to the matcher(a common Aho-Corasick automaton). The fixer is
appended on the matching states and called when they are
reached"""
self.fixers.append(fixer)
tree = reduce_tree(fixer.pattern_tree)
linear = tree.get_linear_subpattern()
match_nodes = self.add(linear, start=self.root)
for match_node in match_nodes:
match_node.fixers.append(fixer)
def add(self, pattern, start):
"Recursively adds a linear pattern to the AC automaton"
#print("adding pattern", pattern, "to", start)
if not pattern:
#print("empty pattern")
return [start]
if type(pattern[0]) is tuple:
#alternatives
#print("alternatives")
match_nodes = []
for alternative in pattern[0]:
#add all alternatives, and add the rest of the pattern
#to each end node
end_nodes = self.add(alternative, start=start)
for end in end_nodes:
match_nodes.extend(self.add(pattern[1:], end))
return match_nodes
else:
#single token
#not last
if pattern[0] not in start.transition_table.keys():
#transition did not exist, create new
next_node = BMNode()
start.transition_table[pattern[0]] = next_node
else:
#transition exists already, follow
next_node = start.transition_table[pattern[0]]
if pattern[1:]:
end_nodes = self.add(pattern[1:], start=next_node)
else:
end_nodes = [next_node]
return end_nodes
def run(self, leaves):
"""The main interface with the bottom matcher. The tree is
traversed from the bottom using the constructed
automaton. Nodes are only checked once as the tree is
retraversed. When the automaton fails, we give it one more
shot(in case the above tree matches as a whole with the
rejected leaf), then we break for the next leaf. There is the
special case of multiple arguments(see code comments) where we
recheck the nodes
Args:
The leaves of the AST tree to be matched
Returns:
A dictionary of node matches with fixers as the keys
"""
current_ac_node = self.root
results = {}
for leaf in leaves:
current_ast_node = leaf
while(current_ast_node):
current_ast_node.was_checked = True
for child in current_ast_node.children:
# multiple statements, recheck
if hasattr(child, "value") and child.value==';':
current_ast_node.was_checked = False
break
if current_ast_node.type == 1:
#name
node_token = current_ast_node.value
else:
node_token = current_ast_node.type
if node_token in current_ac_node.transition_table.keys():
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results.keys():
results[fixer] = []
results[fixer].append(current_ast_node)
else:
#matching failed, reset automaton
current_ac_node = self.root
if current_ast_node.parent is not None \
and current_ast_node.parent.was_checked:
#the rest of the tree upwards has been checked, next leaf
break
#recheck the rejected node once from the root
if node_token in current_ac_node.transition_table.keys():
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results.keys():
results[fixer] = []
results[fixer].append(current_ast_node)
current_ast_node = current_ast_node.parent
return results
def print_ac(self):
"Prints a graphviz diagram of the BM automaton(for debugging)"
print("digraph g{")
def print_node(node):
for subnode_key in node.transition_table.keys():
subnode = node.transition_table[subnode_key]
print("%d -> %d [label=%s] //%s" %
(node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers)))
if subnode_key == 1:
print(subnode.content)
print_node(subnode)
print_node(self.root)
print("}")
# taken from pytree.py for debugging; only used by print_ac
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
| Python |
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that turns <> into !=."""
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
class FixNe(fixer_base.BaseFix):
# This is so simple that we don't need the pattern compiler.
_accept_type = token.NOTEQUAL
def match(self, node):
# Override
return node.value == "<>"
def transform(self, node, results):
new = pytree.Leaf(token.NOTEQUAL, "!=", prefix=node.prefix)
return new
| Python |
"""Fixer for generator.throw(E, V, T).
g.throw(E) -> g.throw(E)
g.throw(E, V) -> g.throw(E(V))
g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
g.throw("foo"[, V[, T]]) will warn about string exceptions."""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ArgList, Attr, is_tuple
class FixThrow(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any trailer< '.' 'throw' >
trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
>
|
power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type is token.STRING:
self.cannot_convert(node, "Python 3 does not support string exceptions")
return
# Leave "g.throw(E)" alone
val = results.get("val")
if val is None:
return
val = val.clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = ""
args = [val]
throw_args = results["args"]
if "tb" in results:
tb = results["tb"].clone()
tb.prefix = ""
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
throw_args.replace(pytree.Node(syms.power, with_tb))
else:
throw_args.replace(Call(exc, args))
| Python |
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for apply().
This converts apply(func, v, k) into (func)(*v, **k)."""
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Call, Comma, parenthesize
class FixApply(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'apply'
trailer<
'('
arglist<
(not argument<NAME '=' any>) func=any ','
(not argument<NAME '=' any>) args=any [','
(not argument<NAME '=' any>) kwds=any] [',']
>
')'
>
>
"""
def transform(self, node, results):
syms = self.syms
assert results
func = results["func"]
args = results["args"]
kwds = results.get("kwds")
prefix = node.prefix
func = func.clone()
if (func.type not in (token.NAME, syms.atom) and
(func.type != syms.power or
func.children[-2].type == token.DOUBLESTAR)):
# Need to parenthesize
func = parenthesize(func)
func.prefix = ""
args = args.clone()
args.prefix = ""
if kwds is not None:
kwds = kwds.clone()
kwds.prefix = ""
l_newargs = [pytree.Leaf(token.STAR, "*"), args]
if kwds is not None:
l_newargs.extend([Comma(),
pytree.Leaf(token.DOUBLESTAR, "**"),
kwds])
l_newargs[-2].prefix = " " # that's the ** token
# XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
# can be translated into f(x, y, *t) instead of f(*(x, y) + t)
#new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
return Call(func, l_newargs, prefix=prefix)
| Python |
"""Fixer for import statements.
If spam is being imported from the local directory, this import:
from spam import eggs
Becomes:
from .spam import eggs
And this import:
import spam
Becomes:
from . import spam
"""
# Local imports
from .. import fixer_base
from os.path import dirname, join, exists, sep
from ..fixer_util import FromImport, syms, token
def traverse_imports(names):
"""
Walks over all the names imported in a dotted_as_names node.
"""
pending = [names]
while pending:
node = pending.pop()
if node.type == token.NAME:
yield node.value
elif node.type == syms.dotted_name:
yield "".join([ch.value for ch in node.children])
elif node.type == syms.dotted_as_name:
pending.append(node.children[0])
elif node.type == syms.dotted_as_names:
pending.extend(node.children[::-2])
else:
raise AssertionError("unkown node type")
class FixImport(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
import_from< 'from' imp=any 'import' ['('] any [')'] >
|
import_name< 'import' imp=any >
"""
def start_tree(self, tree, name):
super(FixImport, self).start_tree(tree, name)
self.skip = "absolute_import" in tree.future_features
def transform(self, node, results):
if self.skip:
return
imp = results['imp']
if node.type == syms.import_from:
# Some imps are top-level (eg: 'import ham')
# some are first level (eg: 'import ham.eggs')
# some are third level (eg: 'import ham.eggs as spam')
# Hence, the loop
while not hasattr(imp, 'value'):
imp = imp.children[0]
if self.probably_a_local_import(imp.value):
imp.value = "." + imp.value
imp.changed()
else:
have_local = False
have_absolute = False
for mod_name in traverse_imports(imp):
if self.probably_a_local_import(mod_name):
have_local = True
else:
have_absolute = True
if have_absolute:
if have_local:
# We won't handle both sibling and absolute imports in the
# same statement at the moment.
self.warning(node, "absolute and local imports together")
return
new = FromImport(".", [imp])
new.prefix = node.prefix
return new
def probably_a_local_import(self, imp_name):
if imp_name.startswith("."):
# Relative imports are certainly not local imports.
return False
imp_name = imp_name.split(".", 1)[0]
base_path = dirname(self.filename)
base_path = join(base_path, imp_name)
# If there is no __init__.py next to the file its not in a package
# so can't be a relative import.
if not exists(join(dirname(base_path), "__init__.py")):
return False
for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd"]:
if exists(base_path + ext):
return True
return False
| Python |
"""Fixer for 'raise E, V, T'
raise -> raise
raise E -> raise E
raise E, V -> raise E(V)
raise E, V, T -> raise E(V).with_traceback(T)
raise (((E, E'), E''), E'''), V -> raise E(V)
raise "foo", V, T -> warns about string exceptions
CAVEATS:
1) "raise E, V" will be incorrectly translated if V is an exception
instance. The correct Python 3 idiom is
raise E from V
but since we can't detect instance-hood by syntax alone and since
any client code would have to be changed as well, we don't automate
this.
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type is token.STRING:
self.cannot_convert(node, "Python 3 does not support string exceptions")
return
# Python 2 supports
# raise ((((E1, E2), E3), E4), E5), V
# as a synonym for
# raise E1, V
# Since Python 3 will not support this, we recurse down any tuple
# literals, always taking the first element.
if is_tuple(exc):
while is_tuple(exc):
# exc.children[1:-1] is the unparenthesized tuple
# exc.children[1].children[0] is the first element of the tuple
exc = exc.children[1].children[0].clone()
exc.prefix = " "
if "val" not in results:
# One-argument raise
new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
new.prefix = node.prefix
return new
val = results["val"].clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = ""
args = [val]
if "tb" in results:
tb = results["tb"].clone()
tb.prefix = ""
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
new.prefix = node.prefix
return new
else:
return pytree.Node(syms.raise_stmt,
[Name("raise"), Call(exc, args)],
prefix=node.prefix)
| Python |
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for print.
Change:
'print' into 'print()'
'print ...' into 'print(...)'
'print ... ,' into 'print(..., end=" ")'
'print >>x, ...' into 'print(..., file=x)'
No changes are applied if print_function is imported from __future__
"""
# Local imports
from .. import patcomp
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Comma, String, is_tuple
parend_expr = patcomp.compile_pattern(
"""atom< '(' [atom|STRING|NAME] ')' >"""
)
class FixPrint(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
simple_stmt< any* bare='print' any* > | print_stmt
"""
def transform(self, node, results):
assert results
bare_print = results.get("bare")
if bare_print:
# Special-case print all by itself
bare_print.replace(Call(Name("print"), [],
prefix=bare_print.prefix))
return
assert node.children[0] == Name("print")
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
# already-parenthesised expression.
return
sep = end = file = None
if args and args[-1] == Comma():
args = args[:-1]
end = " "
if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
l_args[0].prefix = ""
if sep is not None or end is not None or file is not None:
if sep is not None:
self.add_kwarg(l_args, "sep", String(repr(sep)))
if end is not None:
self.add_kwarg(l_args, "end", String(repr(end)))
if file is not None:
self.add_kwarg(l_args, "file", file)
n_stmt = Call(Name("print"), l_args)
n_stmt.prefix = node.prefix
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
n_expr.prefix = ""
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
pytree.Leaf(token.EQUAL, "="),
n_expr))
if l_nodes:
l_nodes.append(Comma())
n_argument.prefix = " "
l_nodes.append(n_argument)
| Python |
"""Fixer that changes input(...) into eval(input(...))."""
# Author: Andre Roberge
# Local imports
from .. import fixer_base
from ..fixer_util import Call, Name
from .. import patcomp
context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >")
class FixInput(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'input' args=trailer< '(' [any] ')' > >
"""
def transform(self, node, results):
# If we're already wrapped in a eval() call, we're done.
if context.match(node.parent.parent):
return
new = node.clone()
new.prefix = ""
return Call(Name("eval"), [new], prefix=node.prefix)
| Python |
"""Fixer that changes 'a ,b' into 'a, b'.
This also changes '{a :b}' into '{a: b}', but does not touch other
uses of colons. It does not touch other uses of whitespace.
"""
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
class FixWsComma(fixer_base.BaseFix):
explicit = True # The user must ask for this fixers
PATTERN = """
any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]>
"""
COMMA = pytree.Leaf(token.COMMA, ",")
COLON = pytree.Leaf(token.COLON, ":")
SEPS = (COMMA, COLON)
def transform(self, node, results):
new = node.clone()
comma = False
for child in new.children:
if child in self.SEPS:
prefix = child.prefix
if prefix.isspace() and "\n" not in prefix:
child.prefix = ""
comma = True
else:
if comma:
prefix = child.prefix
if not prefix:
child.prefix = " "
comma = False
return new
| Python |
"""Adjust some old Python 2 idioms to their modern counterparts.
* Change some type comparisons to isinstance() calls:
type(x) == T -> isinstance(x, T)
type(x) is T -> isinstance(x, T)
type(x) != T -> not isinstance(x, T)
type(x) is not T -> not isinstance(x, T)
* Change "while 1:" into "while True:".
* Change both
v = list(EXPR)
v.sort()
foo(v)
and the more general
v = EXPR
v.sort()
foo(v)
into
v = sorted(EXPR)
foo(v)
"""
# Author: Jacques Frechet, Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms
CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
TYPE = "power< 'type' trailer< '(' x=any ')' > >"
class FixIdioms(fixer_base.BaseFix):
explicit = True # The user must ask for this fixer
PATTERN = r"""
isinstance=comparison< %s %s T=any >
|
isinstance=comparison< T=any %s %s >
|
while_stmt< 'while' while='1' ':' any+ >
|
sorted=any<
any*
simple_stmt<
expr_stmt< id1=any '='
power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
>
'\n'
>
sort=
simple_stmt<
power< id2=any
trailer< '.' 'sort' > trailer< '(' ')' >
>
'\n'
>
next=any*
>
|
sorted=any<
any*
simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
sort=
simple_stmt<
power< id2=any
trailer< '.' 'sort' > trailer< '(' ')' >
>
'\n'
>
next=any*
>
""" % (TYPE, CMP, CMP, TYPE)
def match(self, node):
r = super(FixIdioms, self).match(node)
# If we've matched one of the sort/sorted subpatterns above, we
# want to reject matches where the initial assignment and the
# subsequent .sort() call involve different identifiers.
if r and "sorted" in r:
if r["id1"] == r["id2"]:
return r
return None
return r
def transform(self, node, results):
if "isinstance" in results:
return self.transform_isinstance(node, results)
elif "while" in results:
return self.transform_while(node, results)
elif "sorted" in results:
return self.transform_sort(node, results)
else:
raise RuntimeError("Invalid match")
def transform_isinstance(self, node, results):
x = results["x"].clone() # The thing inside of type()
T = results["T"].clone() # The type being compared against
x.prefix = ""
T.prefix = " "
test = Call(Name("isinstance"), [x, Comma(), T])
if "n" in results:
test.prefix = " "
test = Node(syms.not_test, [Name("not"), test])
test.prefix = node.prefix
return test
def transform_while(self, node, results):
one = results["while"]
one.replace(Name("True", prefix=one.prefix))
def transform_sort(self, node, results):
sort_stmt = results["sort"]
next_stmt = results["next"]
list_call = results.get("list")
simple_expr = results.get("expr")
if list_call:
list_call.replace(Name("sorted", prefix=list_call.prefix))
elif simple_expr:
new = simple_expr.clone()
new.prefix = ""
simple_expr.replace(Call(Name("sorted"), [new],
prefix=simple_expr.prefix))
else:
raise RuntimeError("should not have reached here")
sort_stmt.remove()
btwn = sort_stmt.prefix
# Keep any prefix lines between the sort_stmt and the list_call and
# shove them right after the sorted() call.
if "\n" in btwn:
if next_stmt:
# The new prefix should be everything from the sort_stmt's
# prefix up to the last newline, then the old prefix after a new
# line.
prefix_lines = (btwn.rpartition("\n")[0], next_stmt[0].prefix)
next_stmt[0].prefix = "\n".join(prefix_lines)
else:
assert list_call.parent
assert list_call.next_sibling is None
# Put a blank line after list_call and set its prefix.
end_line = BlankLine()
list_call.parent.append_child(end_line)
assert list_call.next_sibling is end_line
# The new prefix should be everything up to the first new line
# of sort_stmt's prefix.
end_line.prefix = btwn.rpartition("\n")[0]
| Python |
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for StandardError -> Exception."""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixStandarderror(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
'StandardError'
"""
def transform(self, node, results):
return Name("Exception", prefix=node.prefix)
| Python |
"""Fix incompatible renames
Fixes:
* sys.maxint -> sys.maxsize
"""
# Author: Christian Heimes
# based on Collin Winter's fix_import
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {"sys": {"maxint" : "maxsize"},
}
LOOKUP = {}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern():
#bare = set()
for module, replace in list(MAPPING.items()):
for old_attr, new_attr in list(replace.items()):
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
#yield """
# import_name< 'import' (module=%r
# | dotted_as_names< any* module=%r any* >) >
# """ % (module, module)
yield """
import_from< 'from' module_name=%r 'import'
( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
""" % (module, old_attr, old_attr)
yield """
power< module_name=%r trailer< '.' attr_name=%r > any* >
""" % (module, old_attr)
#yield """bare_name=%s""" % alternates(bare)
class FixRenames(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "|".join(build_pattern())
order = "pre" # Pre-order tree traversal
# Don't match the node if it's within another match
def match(self, node):
match = super(FixRenames, self).match
results = match(node)
if results:
if any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
#def start_tree(self, tree, filename):
# super(FixRenames, self).start_tree(tree, filename)
# self.replace = {}
def transform(self, node, results):
mod_name = results.get("module_name")
attr_name = results.get("attr_name")
#bare_name = results.get("bare_name")
#import_mod = results.get("module")
if mod_name and attr_name:
new_attr = LOOKUP[(mod_name.value, attr_name.value)]
attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
| Python |
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes xrange(...) into range(...)."""
# Local imports
from .. import fixer_base
from ..fixer_util import Name, Call, consuming_calls
from .. import patcomp
class FixXrange(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power<
(name='range'|name='xrange') trailer< '(' args=any ')' >
rest=any* >
"""
def start_tree(self, tree, filename):
super(FixXrange, self).start_tree(tree, filename)
self.transformed_xranges = set()
def finish_tree(self, tree, filename):
self.transformed_xranges = None
def transform(self, node, results):
name = results["name"]
if name.value == "xrange":
return self.transform_xrange(node, results)
elif name.value == "range":
return self.transform_range(node, results)
else:
raise ValueError(repr(name))
def transform_xrange(self, node, results):
name = results["name"]
name.replace(Name("range", prefix=name.prefix))
# This prevents the new range call from being wrapped in a list later.
self.transformed_xranges.add(id(node))
def transform_range(self, node, results):
if (id(node) not in self.transformed_xranges and
not self.in_special_context(node)):
range_call = Call(Name("range"), [results["args"].clone()])
# Encase the range call in list().
list_call = Call(Name("list"), [range_call],
prefix=node.prefix)
# Put things that were after the range() call after the list call.
for n in results["rest"]:
list_call.append_child(n)
return list_call
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
| comparison< any 'in' node=any any*>
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in consuming_calls
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
| Python |
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that turns 'long' into 'int' everywhere.
"""
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import is_probably_builtin
class FixLong(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "'long'"
def transform(self, node, results):
if is_probably_builtin(node):
node.value = "int"
node.changed()
| Python |
# Copyright 2008 Armin Ronacher.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that cleans up a tuple argument to isinstance after the tokens
in it were fixed. This is mainly used to remove double occurrences of
tokens as a leftover of the long -> int / unicode -> str conversion.
eg. isinstance(x, (int, long)) -> isinstance(x, (int, int))
-> isinstance(x, int)
"""
from .. import fixer_base
from ..fixer_util import token
class FixIsinstance(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power<
'isinstance'
trailer< '(' arglist< any ',' atom< '('
args=testlist_gexp< any+ >
')' > > ')' >
>
"""
run_order = 6
def transform(self, node, results):
names_inserted = set()
testlist = results["args"]
args = testlist.children
new_args = []
iterator = enumerate(args)
for idx, arg in iterator:
if arg.type == token.NAME and arg.value in names_inserted:
if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
next(iterator)
continue
else:
new_args.append(arg)
if arg.type == token.NAME:
names_inserted.add(arg.value)
if new_args and new_args[-1].type == token.COMMA:
del new_args[-1]
if len(new_args) == 1:
atom = testlist.parent
new_args[0].prefix = atom.prefix
atom.replace(new_args[0])
else:
args[:] = new_args
node.changed()
| Python |
""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if 'it' in results and func.value != 'ifilterfalse':
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node wich contains ('.', 'function') with the
# function (to be consistant with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
| Python |
"""Fix changes imports of urllib which are now incompatible.
This is rather similar to fix_imports, but because of the more
complex nature of the fixing for urllib, it has its own fixer.
"""
# Author: Nick Edds
# Local imports
from .fix_imports import alternates, FixImports
from .. import fixer_base
from ..fixer_util import Name, Comma, FromImport, Newline, attr_chain
MAPPING = {'urllib': [
('urllib.request',
['URLOpener', 'FancyURLOpener', 'urlretrieve',
'_urlopener', 'urlopen', 'urlcleanup',
'pathname2url', 'url2pathname']),
('urllib.parse',
['quote', 'quote_plus', 'unquote', 'unquote_plus',
'urlencode', 'splitattr', 'splithost', 'splitnport',
'splitpasswd', 'splitport', 'splitquery', 'splittag',
'splittype', 'splituser', 'splitvalue', ]),
('urllib.error',
['ContentTooShortError'])],
'urllib2' : [
('urllib.request',
['urlopen', 'install_opener', 'build_opener',
'Request', 'OpenerDirector', 'BaseHandler',
'HTTPDefaultErrorHandler', 'HTTPRedirectHandler',
'HTTPCookieProcessor', 'ProxyHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'AbstractBasicAuthHandler',
'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler',
'AbstractDigestAuthHandler',
'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler',
'HTTPHandler', 'HTTPSHandler', 'FileHandler',
'FTPHandler', 'CacheFTPHandler',
'UnknownHandler']),
('urllib.error',
['URLError', 'HTTPError']),
]
}
# Duplicate the url parsing functions for urllib2.
MAPPING["urllib2"].append(MAPPING["urllib"][1])
def build_pattern():
bare = set()
for old_module, changes in MAPPING.items():
for change in changes:
new_module, members = change
members = alternates(members)
yield """import_name< 'import' (module=%r
| dotted_as_names< any* module=%r any* >) >
""" % (old_module, old_module)
yield """import_from< 'from' mod_member=%r 'import'
( member=%s | import_as_name< member=%s 'as' any > |
import_as_names< members=any* >) >
""" % (old_module, members, members)
yield """import_from< 'from' module_star=%r 'import' star='*' >
""" % old_module
yield """import_name< 'import'
dotted_as_name< module_as=%r 'as' any > >
""" % old_module
# bare_with_attr has a special significance for FixImports.match().
yield """power< bare_with_attr=%r trailer< '.' member=%s > any* >
""" % (old_module, members)
class FixUrllib(FixImports):
def build_pattern(self):
return "|".join(build_pattern())
def transform_import(self, node, results):
"""Transform for the basic import case. Replaces the old
import name with a comma separated list of its
replacements.
"""
import_mod = results.get('module')
pref = import_mod.prefix
names = []
# create a Node list of the replacement modules
for name in MAPPING[import_mod.value][:-1]:
names.extend([Name(name[0], prefix=pref), Comma()])
names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
import_mod.replace(names)
def transform_member(self, node, results):
"""Transform for imports of specific module elements. Replaces
the module to be imported from with the appropriate new
module.
"""
mod_member = results.get('mod_member')
pref = mod_member.prefix
member = results.get('member')
# Simple case with only a single member being imported
if member:
# this may be a list of length one, or just a node
if isinstance(member, list):
member = member[0]
new_name = None
for change in MAPPING[mod_member.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
mod_member.replace(Name(new_name, prefix=pref))
else:
self.cannot_convert(node,
'This is an invalid module element')
# Multiple members being imported
else:
# a dictionary for replacements, order matters
modules = []
mod_dict = {}
members = results.get('members')
for member in members:
member = member.value
# we only care about the actual members
if member != ',':
for change in MAPPING[mod_member.value]:
if member in change[1]:
if change[0] in mod_dict:
mod_dict[change[0]].append(member)
else:
mod_dict[change[0]] = [member]
modules.append(change[0])
new_nodes = []
for module in modules:
elts = mod_dict[module]
names = []
for elt in elts[:-1]:
names.extend([Name(elt, prefix=pref), Comma()])
names.append(Name(elts[-1], prefix=pref))
new_nodes.append(FromImport(module, names))
if new_nodes:
nodes = []
for new_node in new_nodes[:-1]:
nodes.extend([new_node, Newline()])
nodes.append(new_nodes[-1])
node.replace(nodes)
else:
self.cannot_convert(node, 'All module elements are invalid')
def transform_dot(self, node, results):
"""Transform for calls to module members in code."""
module_dot = results.get('bare_with_attr')
member = results.get('member')
new_name = None
if isinstance(member, list):
member = member[0]
for change in MAPPING[module_dot.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
module_dot.replace(Name(new_name,
prefix=module_dot.prefix))
else:
self.cannot_convert(node, 'This is an invalid module element')
def transform(self, node, results):
if results.get('module'):
self.transform_import(node, results)
elif results.get('mod_member'):
self.transform_member(node, results)
elif results.get('bare_with_attr'):
self.transform_dot(node, results)
# Renaming and star imports are not supported for these modules.
elif results.get('module_star'):
self.cannot_convert(node, 'Cannot handle star imports.')
elif results.get('module_as'):
self.cannot_convert(node, 'This module is now multiple modules')
| Python |
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes buffer(...) into memoryview(...)."""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixBuffer(fixer_base.BaseFix):
BM_compatible = True
explicit = True # The user must ask for this fixer
PATTERN = """
power< name='buffer' trailer< '(' [any] ')' > any* >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name("memoryview", prefix=name.prefix))
| Python |
"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# Author: Jack Diederich
# Local imports
from .. import fixer_base
from ..pygram import token
from ..fixer_util import Name, syms, Node, Leaf
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the prefered format, do nothing
return
# !%@#! oneliners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything efter the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == '__metaclass__':
# We found a assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.prefix = ''
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, ')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, '('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.prefix
if arglist.children:
arglist.append_child(Leaf(token.COMMA, ','))
meta_txt.prefix = ' '
else:
meta_txt.prefix = ''
# compact the expression "metaclass = Meta" -> "metaclass=Meta"
expr_stmt = last_metaclass.children[0]
assert expr_stmt.type == syms.expr_stmt
expr_stmt.children[1].prefix = ''
expr_stmt.children[2].prefix = ''
arglist.append_child(last_metaclass)
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, 'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, '\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, 'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
| Python |
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for has_key().
Calls to .has_key() methods are expressed in terms of the 'in'
operator:
d.has_key(k) -> k in d
CAVEATS:
1) While the primary target of this fixer is dict.has_key(), the
fixer will change any has_key() method call, regardless of its
class.
2) Cases like this will not be converted:
m = d.has_key
if m(k):
...
Only *calls* to has_key() are converted. While it is possible to
convert the above to something like
m = d.__contains__
if m(k):
...
this is currently not done.
"""
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, parenthesize
class FixHasKey(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
anchor=power<
before=any+
trailer< '.' 'has_key' >
trailer<
'('
( not(arglist | argument<any '=' any>) arg=any
| arglist<(not argument<any '=' any>) arg=any ','>
)
')'
>
after=any*
>
|
negation=not_test<
'not'
anchor=power<
before=any+
trailer< '.' 'has_key' >
trailer<
'('
( not(arglist | argument<any '=' any>) arg=any
| arglist<(not argument<any '=' any>) arg=any ','>
)
')'
>
>
>
"""
def transform(self, node, results):
assert results
syms = self.syms
if (node.parent.type == syms.not_test and
self.pattern.match(node.parent)):
# Don't transform a node matching the first alternative of the
# pattern when its parent matches the second alternative
return None
negation = results.get("negation")
anchor = results["anchor"]
prefix = node.prefix
before = [n.clone() for n in results["before"]]
arg = results["arg"].clone()
after = results.get("after")
if after:
after = [n.clone() for n in after]
if arg.type in (syms.comparison, syms.not_test, syms.and_test,
syms.or_test, syms.test, syms.lambdef, syms.argument):
arg = parenthesize(arg)
if len(before) == 1:
before = before[0]
else:
before = pytree.Node(syms.power, before)
before.prefix = " "
n_op = Name("in", prefix=" ")
if negation:
n_not = Name("not", prefix=" ")
n_op = pytree.Node(syms.comp_op, (n_not, n_op))
new = pytree.Node(syms.comparison, (arg, n_op, before))
if after:
new = parenthesize(new)
new = pytree.Node(syms.power, (new,) + tuple(after))
if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr,
syms.and_expr, syms.shift_expr,
syms.arith_expr, syms.term,
syms.factor, syms.power):
new = parenthesize(new)
new.prefix = prefix
return new
| Python |
"""
Fixer that changes os.getcwdu() to os.getcwd().
"""
# Author: Victor Stinner
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixGetcwdu(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'os' trailer< dot='.' name='getcwdu' > any* >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name("getcwd", prefix=name.prefix))
| Python |
# Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name("sys"), Name("intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
touch_import(None, 'sys', node)
return new
| Python |
"""Fix incompatible imports and module references."""
# Authors: Collin Winter, Nick Edds
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {'StringIO': 'io',
'cStringIO': 'io',
'cPickle': 'pickle',
'__builtin__' : 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'SocketServer': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
'FileDialog': 'tkinter.filedialog',
'tkFileDialog': 'tkinter.filedialog',
'SimpleDialog': 'tkinter.simpledialog',
'tkSimpleDialog': 'tkinter.simpledialog',
'tkColorChooser': 'tkinter.colorchooser',
'tkCommonDialog': 'tkinter.commondialog',
'Dialog': 'tkinter.dialog',
'Tkdnd': 'tkinter.dnd',
'tkFont': 'tkinter.font',
'tkMessageBox': 'tkinter.messagebox',
'ScrolledText': 'tkinter.scrolledtext',
'Tkconstants': 'tkinter.constants',
'Tix': 'tkinter.tix',
'ttk': 'tkinter.ttk',
'Tkinter': 'tkinter',
'markupbase': '_markupbase',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
# anydbm and whichdb are handled by fix_imports2
'dbhash': 'dbm.bsd',
'dumbdbm': 'dbm.dumb',
'dbm': 'dbm.ndbm',
'gdbm': 'dbm.gnu',
'xmlrpclib': 'xmlrpc.client',
'DocXMLRPCServer': 'xmlrpc.server',
'SimpleXMLRPCServer': 'xmlrpc.server',
'httplib': 'http.client',
'htmlentitydefs' : 'html.entities',
'HTMLParser' : 'html.parser',
'Cookie': 'http.cookies',
'cookielib': 'http.cookiejar',
'BaseHTTPServer': 'http.server',
'SimpleHTTPServer': 'http.server',
'CGIHTTPServer': 'http.server',
#'test.test_support': 'test.support',
'commands': 'subprocess',
'UserString' : 'collections',
'UserList' : 'collections',
'urlparse' : 'urllib.parse',
'robotparser' : 'urllib.robotparser',
}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern(mapping=MAPPING):
mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
bare_names = alternates(mapping.keys())
yield """name_import=import_name< 'import' ((%s) |
multiple_imports=dotted_as_names< any* (%s) any* >) >
""" % (mod_list, mod_list)
yield """import_from< 'from' (%s) 'import' ['(']
( any | import_as_name< any 'as' any > |
import_as_names< any* >) [')'] >
""" % mod_list
yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
multiple_imports=dotted_as_names<
any* dotted_as_name< (%s) 'as' any > any* >) >
""" % (mod_list, mod_list)
# Find usages of module members in code e.g. thread.foo(bar)
yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
class FixImports(fixer_base.BaseFix):
BM_compatible = True
# This is overridden in fix_imports2.
mapping = MAPPING
# We want to run this fixer late, so fix_import doesn't try to make stdlib
# renames into relative imports.
run_order = 6
keep_line_order = True
def build_pattern(self):
return "|".join(build_pattern(self.mapping))
def compile_pattern(self):
# We override this, so MAPPING can be pragmatically altered and the
# changes will be reflected in PATTERN.
self.PATTERN = self.build_pattern()
super(FixImports, self).compile_pattern()
# Don't match the node if it's within another match.
def match(self, node):
match = super(FixImports, self).match
results = match(node)
if results:
# Module usage could be in the trailer of an attribute lookup, so we
# might have nested matches when "bare_with_attr" is present.
if "bare_with_attr" not in results and \
any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
def start_tree(self, tree, filename):
super(FixImports, self).start_tree(tree, filename)
self.replace = {}
def transform(self, node, results):
import_mod = results.get("module_name")
if import_mod:
mod_name = import_mod.value
new_name = self.mapping[mod_name]
import_mod.replace(Name(new_name, prefix=import_mod.prefix))
if "name_import" in results:
# If it's not a "from x import x, y" or "import x as y" import,
# marked its usage to be replaced.
self.replace[mod_name] = new_name
if "multiple_imports" in results:
# This is a nasty hack to fix multiple imports on a line (e.g.,
# "import StringIO, urlparse"). The problem is that I can't
# figure out an easy way to make a pattern recognize the keys of
# MAPPING randomly sprinkled in an import statement.
results = self.match(node)
if results:
self.transform(node, results)
else:
# Replace usage of the module.
bare_name = results["bare_with_attr"][0]
new_name = self.replace.get(bare_name.value)
if new_name:
bare_name.replace(Name(new_name, prefix=bare_name.prefix))
| Python |
# Copyright 2008 Armin Ronacher.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for reduce().
Makes sure reduce() is imported from the functools module if reduce is
used in that module.
"""
from lib2to3 import fixer_base
from lib2to3.fixer_util import touch_import
class FixReduce(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'reduce'
trailer< '('
arglist< (
(not(argument<any '=' any>) any ','
not(argument<any '=' any>) any) |
(not(argument<any '=' any>) any ','
not(argument<any '=' any>) any ','
not(argument<any '=' any>) any)
) >
')' >
>
"""
def transform(self, node, results):
touch_import('functools', 'reduce', node)
| Python |
"""Fixer for basestring -> str."""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixBasestring(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "'basestring'"
def transform(self, node, results):
return Name("str", prefix=node.prefix)
| Python |
"""
Optional fixer to transform set() calls to set literals.
"""
# Author: Benjamin Peterson
from lib2to3 import fixer_base, pytree
from lib2to3.fixer_util import token, syms
class FixSetLiteral(fixer_base.BaseFix):
BM_compatible = True
explicit = True
PATTERN = """power< 'set' trailer< '('
(atom=atom< '[' (items=listmaker< any ((',' any)* [',']) >
|
single=any) ']' >
|
atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' >
)
')' > >
"""
def transform(self, node, results):
single = results.get("single")
if single:
# Make a fake listmaker
fake = pytree.Node(syms.listmaker, [single.clone()])
single.replace(fake)
items = fake
else:
items = results["items"]
# Build the contents of the literal
literal = [pytree.Leaf(token.LBRACE, "{")]
literal.extend(n.clone() for n in items.children)
literal.append(pytree.Leaf(token.RBRACE, "}"))
# Set the prefix of the right brace to that of the ')' or ']'
literal[-1].prefix = items.next_sibling.prefix
maker = pytree.Node(syms.dictsetmaker, literal)
maker.prefix = node.prefix
# If the original was a one tuple, we need to remove the extra comma.
if len(maker.children) == 4:
n = maker.children[2]
n.remove()
maker.children[-1].prefix = n.prefix
# Finally, replace the set call with our shiny new literal.
return maker
| Python |
"""Fixer for function definitions with tuple parameters.
def func(((a, b), c), d):
...
->
def func(x, d):
((a, b), c) = x
...
It will also support lambdas:
lambda (x, y): x + y -> lambda t: t[0] + t[1]
# The parens are a syntax error in Python 3
lambda (x): x + y -> lambda x: x + y
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms
def is_docstring(stmt):
return isinstance(stmt, pytree.Node) and \
stmt.children[0].type == token.STRING
class FixTupleParams(fixer_base.BaseFix):
run_order = 4 # run this fixer first, lambda is often included in
# other patterns
BM_compatible = True
PATTERN = """
funcdef< 'def' any parameters< '(' args=any ')' >
['->' any] ':' suite=any+ >
|
lambda=
lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
':' body=any
>
"""
def transform(self, node, results):
if "lambda" in results:
return self.transform_lambda(node, results)
new_lines = []
suite = results["suite"]
args = results["args"]
# This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
# TODO(cwinter): suite-cleanup
if suite[0].children[1].type == token.INDENT:
start = 2
indent = suite[0].children[1].value
end = Newline()
else:
start = 0
indent = "; "
end = pytree.Leaf(token.INDENT, "")
# We need access to self for new_name(), and making this a method
# doesn't feel right. Closing over self and new_lines makes the
# code below cleaner.
def handle_tuple(tuple_arg, add_prefix=False):
n = Name(self.new_name())
arg = tuple_arg.clone()
arg.prefix = ""
stmt = Assign(arg, n.clone())
if add_prefix:
n.prefix = " "
tuple_arg.replace(n)
new_lines.append(pytree.Node(syms.simple_stmt,
[stmt, end.clone()]))
if args.type == syms.tfpdef:
handle_tuple(args)
elif args.type == syms.typedargslist:
for i, arg in enumerate(args.children):
if arg.type == syms.tfpdef:
# Without add_prefix, the emitted code is correct,
# just ugly.
handle_tuple(arg, add_prefix=(i > 0))
if not new_lines:
return
# This isn't strictly necessary, but it plays nicely with other fixers.
# TODO(cwinter) get rid of this when children becomes a smart list
for line in new_lines:
line.parent = suite[0]
# TODO(cwinter) suite-cleanup
after = start
if start == 0:
new_lines[0].prefix = " "
elif is_docstring(suite[0].children[start]):
new_lines[0].prefix = indent
after = start + 1
for line in new_lines:
line.parent = suite[0]
suite[0].children[after:after] = new_lines
for i in range(after+1, after+len(new_lines)+1):
suite[0].children[i].prefix = indent
suite[0].changed()
def transform_lambda(self, node, results):
args = results["args"]
body = results["body"]
inner = simplify_args(results["inner"])
# Replace lambda ((((x)))): x with lambda x: x
if inner.type == token.NAME:
inner = inner.clone()
inner.prefix = " "
args.replace(inner)
return
params = find_params(args)
to_index = map_to_index(params)
tup_name = self.new_name(tuple_name(params))
new_param = Name(tup_name, prefix=" ")
args.replace(new_param.clone())
for n in body.post_order():
if n.type == token.NAME and n.value in to_index:
subscripts = [c.clone() for c in to_index[n.value]]
new = pytree.Node(syms.power,
[new_param.clone()] + subscripts)
new.prefix = n.prefix
n.replace(new)
### Helper functions for transform_lambda()
def simplify_args(node):
if node.type in (syms.vfplist, token.NAME):
return node
elif node.type == syms.vfpdef:
# These look like vfpdef< '(' x ')' > where x is NAME
# or another vfpdef instance (leading to recursion).
while node.type == syms.vfpdef:
node = node.children[1]
return node
raise RuntimeError("Received unexpected node %s" % node)
def find_params(node):
if node.type == syms.vfpdef:
return find_params(node.children[1])
elif node.type == token.NAME:
return node.value
return [find_params(c) for c in node.children if c.type != token.COMMA]
def map_to_index(param_list, prefix=[], d=None):
if d is None:
d = {}
for i, obj in enumerate(param_list):
trailer = [Subscript(Number(str(i)))]
if isinstance(obj, list):
map_to_index(obj, trailer, d=d)
else:
d[obj] = prefix + trailer
return d
def tuple_name(param_list):
l = []
for obj in param_list:
if isinstance(obj, list):
l.append(tuple_name(obj))
else:
l.append(obj)
return "_".join(l)
| Python |
"""
Convert use of sys.exitfunc to use the atexit module.
"""
# Author: Benjamin Peterson
from lib2to3 import pytree, fixer_base
from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms
class FixExitfunc(fixer_base.BaseFix):
keep_line_order = True
BM_compatible = True
PATTERN = """
(
sys_import=import_name<'import'
('sys'
|
dotted_as_names< (any ',')* 'sys' (',' any)* >
)
>
|
expr_stmt<
power< 'sys' trailer< '.' 'exitfunc' > >
'=' func=any >
)
"""
def __init__(self, *args):
super(FixExitfunc, self).__init__(*args)
def start_tree(self, tree, filename):
super(FixExitfunc, self).start_tree(tree, filename)
self.sys_import = None
def transform(self, node, results):
# First, find a the sys import. We'll just hope it's global scope.
if "sys_import" in results:
if self.sys_import is None:
self.sys_import = results["sys_import"]
return
func = results["func"].clone()
func.prefix = ""
register = pytree.Node(syms.power,
Attr(Name("atexit"), Name("register"))
)
call = Call(register, [func], node.prefix)
node.replace(call)
if self.sys_import is None:
# That's interesting.
self.warning(node, "Can't find sys import; Please add an atexit "
"import at the top of your file.")
return
# Now add an atexit import after the sys import.
names = self.sys_import.children[1]
if names.type == syms.dotted_as_names:
names.append_child(Comma())
names.append_child(Name("atexit", " "))
else:
containing_stmt = self.sys_import.parent
position = containing_stmt.children.index(self.sys_import)
stmt_container = containing_stmt.parent
new_import = pytree.Node(syms.import_name,
[Name("import"), Name("atexit", " ")]
)
new = pytree.Node(syms.simple_stmt, [new_import])
containing_stmt.insert_child(position + 1, Newline())
containing_stmt.insert_child(position + 2, new)
| Python |
"""Fixer that turns 1L into 1, 0755 into 0o755.
"""
# Copyright 2007 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Number
class FixNumliterals(fixer_base.BaseFix):
# This is so simple that we don't need the pattern compiler.
_accept_type = token.NUMBER
def match(self, node):
# Override
if type(node.value) is int:
return False
return (node.value.startswith("0") or node.value[-1] in "Ll")
def transform(self, node, results):
val = node.value
if val[-1] in 'Ll':
val = val[:-1]
elif val.startswith('0') and val.isdigit() and len(set(val)) > 1:
val = "0o" + val[1:]
return Number(val, prefix=node.prefix)
| Python |
"""Fix "for x in f.xreadlines()" -> "for x in f".
This fixer will also convert g(f.xreadlines) into g(f.__iter__)."""
# Author: Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixXreadlines(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > >
|
power< any+ trailer< '.' no_call='xreadlines' > >
"""
def transform(self, node, results):
no_call = results.get("no_call")
if no_call:
no_call.replace(Name("__iter__", prefix=no_call.prefix))
else:
node.replace([x.clone() for x in results["call"]])
| Python |
# Dummy file to make this directory a package.
| Python |
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for execfile.
This converts usages of the execfile function into calls to the built-in
exec() function.
"""
from .. import fixer_base
from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
ArgList, String, syms)
class FixExecfile(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
|
power< 'execfile' trailer< '(' filename=any ')' > >
"""
def transform(self, node, results):
assert results
filename = results["filename"]
globals = results.get("globals")
locals = results.get("locals")
# Copy over the prefix from the right parentheses end of the execfile
# call.
execfile_paren = node.children[-1].children[-1].clone()
# Construct open().read().
open_args = ArgList([filename.clone()], rparen=execfile_paren)
open_call = Node(syms.power, [Name("open"), open_args])
read = [Node(syms.trailer, [Dot(), Name('read')]),
Node(syms.trailer, [LParen(), RParen()])]
open_expr = [open_call] + read
# Wrap the open call in a compile call. This is so the filename will be
# preserved in the execed code.
filename_arg = filename.clone()
filename_arg.prefix = " "
exec_str = String("'exec'", " ")
compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
compile_call = Call(Name("compile"), compile_args, "")
# Finally, replace the execfile call with an exec call.
args = [compile_call]
if globals is not None:
args.extend([Comma(), globals.clone()])
if locals is not None:
args.extend([Comma(), locals.clone()])
return Call(Name("exec"), args, prefix=node.prefix)
| Python |
"""Fixer that changes unicode to str, unichr to chr, and u"..." into "...".
"""
import re
from ..pgen2 import token
from .. import fixer_base
_mapping = {"unichr" : "chr", "unicode" : "str"}
_literal_re = re.compile(r"[uU][rR]?[\'\"]")
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
if _literal_re.match(node.value):
new = node.clone()
new.value = new.value[1:]
return new
| Python |
""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import BlankLine, syms, token
class FixItertoolsImports(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
import_from< 'from' 'itertools' 'import' imports=any >
""" %(locals())
def transform(self, node, results):
imports = results['imports']
if imports.type == syms.import_as_name or not imports.children:
children = [imports]
else:
children = imports.children
for child in children[::2]:
if child.type == token.NAME:
member = child.value
name_node = child
elif child.type == token.STAR:
# Just leave the import as is.
return
else:
assert child.type == syms.import_as_name
name_node = child.children[0]
member_name = name_node.value
if member_name in ('imap', 'izip', 'ifilter'):
child.value = None
child.remove()
elif member_name == 'ifilterfalse':
node.changed()
name_node.value = 'filterfalse'
# Make sure the import statement is still sane
children = imports.children[:] or [imports]
remove_comma = True
for child in children:
if remove_comma and child.type == token.COMMA:
child.remove()
else:
remove_comma ^= True
if children[-1].type == token.COMMA:
children[-1].remove()
# If there are no imports left, just get rid of the entire statement
if (not (imports.children or getattr(imports, 'value', None)) or
imports.parent is None):
p = node.prefix
node = BlankLine()
node.prefix = p
return node
| Python |
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for dict methods.
d.keys() -> list(d.keys())
d.items() -> list(d.items())
d.values() -> list(d.values())
d.iterkeys() -> iter(d.keys())
d.iteritems() -> iter(d.items())
d.itervalues() -> iter(d.values())
d.viewkeys() -> d.keys()
d.viewitems() -> d.items()
d.viewvalues() -> d.values()
Except in certain very specific contexts: the iter() can be dropped
when the context is list(), sorted(), iter() or for...in; the list()
can be dropped when the context is list() or sorted() (but not iter()
or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
set(), any(), all(), sum().
Note: iter(d.keys()) could be written as iter(d) but since the
original d.iterkeys() was also redundant we don't fix this. And there
are (rare) contexts where it makes a difference (e.g. when passing it
as an argument to a function that introspects the argument).
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from .. import fixer_util
iter_exempt = fixer_util.consuming_calls | set(["iter"])
class FixDict(fixer_base.BaseFix):
PATTERN = """
power< head=any+
trailer< '.' method=('keys'|'items'|'values'|
'iterkeys'|'iteritems'|'itervalues'|
'viewkeys'|'viewitems'|'viewvalues') >
parens=trailer< '(' ')' >
tail=any*
>
"""
BM_compatible = True
def transform(self, node, results):
head = results["head"]
method = results["method"][0] # Extract node for method name
tail = results["tail"]
syms = self.syms
method_name = method.value
isiter = method_name.startswith("iter")
isview = method_name.startswith("view")
if isiter or isview:
method_name = method_name[4:]
assert method_name in ("keys", "items", "values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
args = head + [pytree.Node(syms.trailer,
[Dot(),
Name(method_name,
prefix=method.prefix)]),
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not (special or isview):
new.prefix = ""
new = Call(Name("iter" if isiter else "list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.prefix = node.prefix
return new
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node, isiter):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
if isiter:
# iter(d.iterkeys()) -> iter(d.keys()), etc.
return results["func"].value in iter_exempt
else:
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in fixer_util.consuming_calls
if not isiter:
return False
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
| Python |
"""Fixer for __nonzero__ -> __bool__ methods."""
# Author: Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Name, syms
class FixNonzero(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def' name='__nonzero__'
parameters< '(' NAME ')' > any+ >
any* > >
"""
def transform(self, node, results):
name = results["name"]
new = Name("__bool__", prefix=name.prefix)
name.replace(new)
| Python |
"""Fixer for except statements with named exceptions.
The following cases will be converted:
- "except E, T:" where T is a name:
except E as T:
- "except E, T:" where T is not a name, tuple or list:
except E as t:
T = t
This is done because the target of an "except" clause must be a
name.
- "except E, T:" where T is a tuple or list literal:
except E as t:
T = t.args
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
def find_excepts(nodes):
for i, n in enumerate(nodes):
if n.type == syms.except_clause:
if n.children[0].value == 'except':
yield (n, nodes[i+2])
class FixExcept(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
try_stmt< 'try' ':' (simple_stmt | suite)
cleanup=(except_clause ':' (simple_stmt | suite))+
tail=(['except' ':' (simple_stmt | suite)]
['else' ':' (simple_stmt | suite)]
['finally' ':' (simple_stmt | suite)]) >
"""
def transform(self, node, results):
syms = self.syms
tail = [n.clone() for n in results["tail"]]
try_cleanup = [ch.clone() for ch in results["cleanup"]]
for except_clause, e_suite in find_excepts(try_cleanup):
if len(except_clause.children) == 4:
(E, comma, N) = except_clause.children[1:4]
comma.replace(Name("as", prefix=" "))
if N.type != token.NAME:
# Generate a new N for the except clause
new_N = Name(self.new_name(), prefix=" ")
target = N.clone()
target.prefix = ""
N.replace(new_N)
new_N = new_N.clone()
# Insert "old_N = new_N" as the first statement in
# the except body. This loop skips leading whitespace
# and indents
#TODO(cwinter) suite-cleanup
suite_stmts = e_suite.children
for i, stmt in enumerate(suite_stmts):
if isinstance(stmt, pytree.Node):
break
# The assignment is different if old_N is a tuple or list
# In that case, the assignment is old_N = new_N.args
if is_tuple(N) or is_list(N):
assign = Assign(target, Attr(new_N, Name('args')))
else:
assign = Assign(target, new_N)
#TODO(cwinter) stopgap until children becomes a smart list
for child in reversed(suite_stmts[:i]):
e_suite.insert_child(0, child)
e_suite.insert_child(i, assign)
elif N.prefix == "":
# No space after a comma is legal; no space after "as",
# not so much.
N.prefix = " "
#TODO(cwinter) fix this when children becomes a smart list
children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
return pytree.Node(node.type, children)
| Python |
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.filter"
def transform(self, node, results):
if self.should_skip(node):
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name("_f"),
Name("_f"),
results["seq"].clone(),
Name("_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.prefix = ""
new = Call(Name("list"), [new])
new.prefix = node.prefix
return new
| Python |
"""
Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...)
unless there exists a 'from future_builtins import zip' statement in the
top-level namespace.
We avoid the transformation if the zip() call is directly contained in
iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name, Call, in_special_context
class FixZip(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
power< 'zip' args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.zip"
def transform(self, node, results):
if self.should_skip(node):
return
if in_special_context(node):
return None
new = node.clone()
new.prefix = ""
new = Call(Name("list"), [new])
new.prefix = node.prefix
return new
| Python |
"""Fix incompatible imports and module references that must be fixed after
fix_imports."""
from . import fix_imports
MAPPING = {
'whichdb': 'dbm',
'anydbm': 'dbm',
}
class FixImports2(fix_imports.FixImports):
run_order = 7
mapping = MAPPING
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.