code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def main():
line_infos = [[0]*1024 for i in range(10)]
for idx in range(10):
for flag in range(1024):
flag |= 0xFC00
p1 = (idx and [idx-1] or [0xF])[0]
while not flag & (1 << p1):
p1 = (p1 and [p1-1] or [0xF])[0]
p2 = (p1 and [p1-1] or [0xF])[0]
while not flag & (1 << p2):
p2 = (p2 and [p2-1] or [0xF])[0]
n1 = idx + 1
while not flag & (1 << n1):
n1 += 1
n2 = n1 + 1
while not flag & (1 << n2):
n2 += 1
line_infos[idx][flag & 1023] = p1 | (p2 << 4) | (n1 << 8) | (n2 << 12)
bits = [1 << i for i in range(10)]
bit_counts = [len([j for j in bits if i & j])for i in range(1024)]
distance_infos = [[(19 << 10 | 1023)]*128 for i in range(91)]
for src in range(90):
sy, sx = divmod(src, 9)
for dst in range(90):
if dst == src:
continue
dy, dx = divmod(dst, 9)
if dx == sx:
idx = dx+10
mask = sum(bits[i] for i in set(range(dy+1, sy)+range(sy+1, dy)) if i in range(10))
distance_infos[src][dst] = (idx << 10) | mask
elif dy == sy:
idx = dy
mask = sum(bits[i] for i in set(range(dx+1, sx)+range(sx+1, dx)) if i in range(10))
distance_infos[src][dst] = (idx << 10) | mask
dict = {}
dict['infos'] = d2a_str(line_infos, u32)
dict['counts'] = d1a_str(bit_counts, u32)
dict['distance'] = d2a_str(distance_infos, u32)
template = open(os.path.join(template_path, 'bitmap_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'bitmap_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
import xq_data
xq_data.main()
import xq_position_data
xq_position_data.main()
import history_data
history_data.main()
import bitmap_data
bitmap_data.main()
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
def capture_scores():
capture_scores = [[0]*32 for i in range(33)]
e = [10000, 1041, 1040, 2000, 1088, 1096, 1020]
m = [1000, 41, 40, 200, 88, 96, 20]
def level(src_type, dst_type): return levels[src_type][dst_type]
def color(piece): return PC[piece]
def type(piece):
t = PT[piece]
if t >= 7:
t -= 7
return t
for src_piece in range(32):
for dst_piece in range(32):
if color(src_piece) != color(dst_piece):
src_type = type(src_piece)
dst_type = type(dst_piece)
capture_scores[dst_piece][src_piece] = e[dst_type] - m[src_type] + 1 << 17
return capture_scores
capture_scores = capture_scores()
def main():
dict = {}
dict['scores'] = d2a_str(capture_scores, u32)
template = open(os.path.join(template_path, 'history_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'history_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
def capture_scores():
capture_scores = [[0]*32 for i in range(33)]
e = [10000, 1041, 1040, 2000, 1088, 1096, 1020]
m = [1000, 41, 40, 200, 88, 96, 20]
def level(src_type, dst_type): return levels[src_type][dst_type]
def color(piece): return PC[piece]
def type(piece):
t = PT[piece]
if t >= 7:
t -= 7
return t
for src_piece in range(32):
for dst_piece in range(32):
if color(src_piece) != color(dst_piece):
src_type = type(src_piece)
dst_type = type(dst_piece)
capture_scores[dst_piece][src_piece] = e[dst_type] - m[src_type] + 1 << 17
return capture_scores
capture_scores = capture_scores()
def main():
dict = {}
dict['scores'] = d2a_str(capture_scores, u32)
template = open(os.path.join(template_path, 'history_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'history_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def main():
line_infos = [[0]*1024 for i in range(10)]
for idx in range(10):
for flag in range(1024):
flag |= 0xFC00
p1 = (idx and [idx-1] or [0xF])[0]
while not flag & (1 << p1):
p1 = (p1 and [p1-1] or [0xF])[0]
p2 = (p1 and [p1-1] or [0xF])[0]
while not flag & (1 << p2):
p2 = (p2 and [p2-1] or [0xF])[0]
n1 = idx + 1
while not flag & (1 << n1):
n1 += 1
n2 = n1 + 1
while not flag & (1 << n2):
n2 += 1
line_infos[idx][flag & 1023] = p1 | (p2 << 4) | (n1 << 8) | (n2 << 12)
bits = [1 << i for i in range(10)]
bit_counts = [len([j for j in bits if i & j])for i in range(1024)]
distance_infos = [[(19 << 10 | 1023)]*128 for i in range(91)]
for src in range(90):
sy, sx = divmod(src, 9)
for dst in range(90):
if dst == src:
continue
dy, dx = divmod(dst, 9)
if dx == sx:
idx = dx+10
mask = sum(bits[i] for i in set(range(dy+1, sy)+range(sy+1, dy)) if i in range(10))
distance_infos[src][dst] = (idx << 10) | mask
elif dy == sy:
idx = dy
mask = sum(bits[i] for i in set(range(dx+1, sx)+range(sx+1, dx)) if i in range(10))
distance_infos[src][dst] = (idx << 10) | mask
dict = {}
dict['infos'] = d2a_str(line_infos, u32)
dict['counts'] = d1a_str(bit_counts, u32)
dict['distance'] = d2a_str(distance_infos, u32)
template = open(os.path.join(template_path, 'bitmap_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'bitmap_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
RedKingPawnValues = [0]*91
BlackKingPawnValues = [0]*91
AdvisorBishopValues = [0]*91
RedRookValues = [0]*91
BlackRookValues = [0]*91
RedKnightValues = [0]*91
BlackKnightValues = [0]*91
RedCannonValues = [0]*91
BlackCannonValues = [0]*91
def value():
KingBaseValue = 5000
AdvisorBaseValue = 40
BishopBaseValue = 40
RookBaseValue = 200
KnightBaseValue = 88
CannonBaseValue = 96
PawnBaseValue = 9
RedKingPawnPositionValues = [
0, 0, 0, 1, 5, 1, 0, 0, 0,
0, 0, 0, -8, -8, -8, 0, 0, 0,
0, 0, 0, -9, -9, -9, 0, 0, 0,
-2, 0, -2, 0, 6, 0, -2, 0, -2,
3, 0, 4, 0, 7, 0, 4, 0, 3,
10, 18, 22, 35, 40, 35, 22, 18, 10,
20, 27, 30, 40, 42, 40, 35, 27, 20,
20, 30, 45, 55, 55, 55, 45, 30, 20,
20, 30, 50, 65, 70, 65, 50, 30, 20,
0, 0, 0, 2, 4, 2, 0, 0, 0,]
RedAdvisorBishopPositionValues = [
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0,
-2, 0, 0, 0, 3, 0, 0, 0, -2,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,]
RedRookPositionValues = [
-6, 6, 4, 12, 0, 12, 4, 6, -6,
5, 8, 6, 12, 0, 12, 6, 8, 5,
-2, 8, 4, 12, 12, 12, 4, 8, -2,
4, 9, 4, 12, 14, 12, 4, 9, 4,
8, 12, 12, 14, 15, 14, 12, 12, 8,
8, 11, 11, 14, 15, 14, 11, 11, 8,
6, 13, 13, 16, 16, 16, 13, 13, 6,
6, 8, 7, 14, 16, 14, 7, 8, 6,
6, 12, 9, 16, 33, 16, 9, 12, 6,
6, 8, 7, 13, 14, 13, 7, 8, 6,]
RedKnightPositionValues = [
0, -3, 2, 0, 2, 0, 2, -3, 0,
-3, 2, 4, 5, -10, 5, 4, 2, -3,
5, 4, 6, 7, 4, 7, 6, 4, 5,
4, 6, 10, 7, 10, 7, 10, 6, 4,
2, 10, 13, 14, 15, 14, 13, 10, 2,
2, 12, 11, 15, 16, 15, 11, 12, 2,
5, 20, 12, 19, 12, 19, 12, 20, 5,
4, 10, 11, 15, 11, 15, 11, 10, 4,
2, 8, 15, 9, 6, 9, 15, 8, 2,
2, 2, 2, 8, 2, 8, 2, 2, 2,]
RedCannonPositionValues = [
0, 0, 1, 3, 3, 3, 1, 0, 0,
0, 1, 2, 2, 2, 2, 2, 1, 0,
1, 0, 4, 3, 5, 3, 4, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 3, 0, 4, 0, 3, 0, -1,
0, 0, 0, 0, 4, 0, 0, 0, 0,
0, 3, 3, 2, 4, 2, 3, 3, 0,
1, 1, 0, -5, -4, -5, 0, 1, 1,
2, 2, 0, -4, -7, -4, 0, 2, 2,
4, 4, 0, -5, -6, -5, 0, 4, 4,]
for sq in range(90):
flag = CoordinateFlags[sq]
if flag & RedKingFlag:
RedKingPawnValues[sq] = KingBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
if flag & RedAdvisorFlag:
AdvisorBishopValues[sq] = AdvisorBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
if flag & RedBishopFlag:
AdvisorBishopValues[sq] = BishopBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
RedRookValues[sq] = RookBaseValue + RedRookPositionValues[sq]
BlackRookValues[89 - sq] = -RedRookValues[sq]
RedKnightValues[sq] = KnightBaseValue + RedKnightPositionValues[sq]
BlackKnightValues[89 - sq] = -RedKnightValues[sq]
RedCannonValues[sq] = CannonBaseValue + RedCannonPositionValues[sq]
BlackCannonValues[89 - sq] = -RedCannonValues[sq]
if flag & RedPawnFlag:
RedKingPawnValues[sq] = PawnBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
value()
RedKingPawnLocks = [0]*91
BlackKingPawnLocks = [0]*91
AdvisorBishopLocks = [0]*91
RedRookLocks = [0]*91
BlackRookLocks = [0]*91
RedKnightLocks = [0]*91
BlackKnightLocks = [0]*91
RedCannonLocks = [0]*91
BlackCannonLocks = [0]*91
RedKingPawnKeys = [0]*91
BlackKingPawnKeys = [0]*91
AdvisorBishopKeys = [0]*91
RedRookKeys = [0]*91
BlackRookKeys = [0]*91
RedKnightKeys = [0]*91
BlackKnightKeys = [0]*91
RedCannonKeys = [0]*91
BlackCannonKeys = [0]*91
def hash():
from random import randint, seed
seed(51)
for sq in range(90):
flag = CoordinateFlags[sq]
if flag & RedKingPawnFlag:
RedKingPawnLocks[sq] = randint(0, 0x10000000000000000)
RedKingPawnKeys[sq] = randint(0, 0x100000000)
BlackKingPawnLocks[89 - sq] = randint(0, 0x10000000000000000)
BlackKingPawnKeys[89 - sq] = randint(0, 0x100000000)
if flag & AdvisorBishopFlag:
AdvisorBishopLocks[sq] = randint(0, 0x10000000000000000)
AdvisorBishopKeys[sq] = randint(0, 0x100000000)
RedRookLocks[sq] = randint(0, 0x10000000000000000)
RedRookKeys[sq] = randint(0, 0x100000000)
BlackRookLocks[sq] = randint(0, 0x10000000000000000)
BlackRookKeys[sq] = randint(0, 0x100000000)
RedKnightLocks[sq] = randint(0, 0x10000000000000000)
RedKnightKeys[sq] = randint(0, 0x100000000)
BlackKnightLocks[sq] = randint(0, 0x10000000000000000)
BlackKnightKeys[sq] = randint(0, 0x100000000)
RedCannonLocks[sq] = randint(0, 0x10000000000000000)
RedCannonKeys[sq] = randint(0, 0x100000000)
BlackCannonLocks[sq] = randint(0, 0x10000000000000000)
BlackCannonKeys[sq] = randint(0, 0x100000000)
file = open('hash.data')
for seq in [RedKingPawnLocks,BlackKingPawnLocks,AdvisorBishopLocks,RedRookLocks,BlackRookLocks,RedKnightLocks,BlackKnightLocks,RedCannonLocks,BlackCannonLocks]:
for i in range(90):
i1 = int(file.readline())
i2 = int(file.readline())
seq[i] = (i1<<32)|i2
for seq in [RedKingPawnKeys,BlackKingPawnKeys,AdvisorBishopKeys,RedRookKeys,BlackRookKeys,RedKnightKeys,BlackKnightKeys,RedCannonKeys,BlackCannonKeys]:
for i in range(90):
seq[i] = int(file.readline())
hash()
def main():
dict = {}
dict['rkpl'] = d1a_str(RedKingPawnLocks, u64)
dict['rkpk'] = d1a_str(RedKingPawnKeys, u32)
dict['rkpv'] = d1a_str(RedKingPawnValues, s32)
dict['bkpl'] = d1a_str(BlackKingPawnLocks, u64)
dict['bkpk'] = d1a_str(BlackKingPawnKeys, u32)
dict['bkpv'] = d1a_str(BlackKingPawnValues, s32)
dict['abl'] = d1a_str(AdvisorBishopLocks, u64)
dict['abk'] = d1a_str(AdvisorBishopKeys, u32)
dict['abv'] = d1a_str(AdvisorBishopValues, s32)
dict['rrl'] = d1a_str(RedRookLocks, u64)
dict['rrk'] = d1a_str(RedRookKeys, u32)
dict['rrv'] = d1a_str(RedRookValues, s32)
dict['brl'] = d1a_str(BlackRookLocks, u64)
dict['brk'] = d1a_str(BlackRookKeys, u32)
dict['brv'] = d1a_str(BlackRookValues, s32)
dict['rnl'] = d1a_str(RedKnightLocks, u64)
dict['rnk'] = d1a_str(RedKnightKeys, u32)
dict['rnv'] = d1a_str(RedKnightValues, s32)
dict['bnl'] = d1a_str(BlackKnightLocks, u64)
dict['bnk'] = d1a_str(BlackKnightKeys, u32)
dict['bnv'] = d1a_str(BlackKnightValues, s32)
dict['rcl'] = d1a_str(RedCannonLocks, u64)
dict['rck'] = d1a_str(RedCannonKeys, u32)
dict['rcv'] = d1a_str(RedCannonValues, s32)
dict['bcl'] = d1a_str(BlackCannonLocks, u64)
dict['bck'] = d1a_str(BlackCannonKeys, u32)
dict['bcv'] = d1a_str(BlackCannonValues, s32)
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
PL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_red_rook_locks']*2+['s_red_knight_locks']*2\
+['s_red_cannon_locks']*2+['s_red_king_pawn_locks']*5\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_black_rook_locks']*2+['s_black_knight_locks']*2\
+['s_black_cannon_locks']*2+['s_black_king_pawn_locks']*5
PK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_red_rook_keys']*2+['s_red_knight_keys']*2\
+['s_red_cannon_keys']*2+['s_red_king_pawn_keys']*5\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_black_rook_keys']*2+['s_black_knight_keys']*2\
+['s_black_cannon_keys']*2+['s_black_king_pawn_keys']*5
PV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*4+['s_red_rook_values']*2+['s_red_knight_values']*2\
+['s_red_cannon_values']*2+['s_red_king_pawn_values']*5\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*4+['s_black_rook_values']*2+['s_black_knight_values']*2\
+['s_black_cannon_values']*2+['s_black_king_pawn_values']*5
dict['plocks'] = d1a_str(PL, lambda x: x)
dict['pkeys'] = d1a_str(PK, lambda x: x)
dict['pvalues'] = d1a_str(PV, lambda x: x)
TL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_red_rook_locks','s_red_knight_locks','s_red_cannon_locks','s_red_king_pawn_locks']\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_black_rook_locks','s_black_knight_locks','s_black_cannon_locks','s_black_king_pawn_locks']
TK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_red_rook_keys','s_red_knight_keys','s_red_cannon_keys','s_red_king_pawn_keys']\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_black_rook_keys','s_black_knight_keys','s_black_cannon_keys','s_black_king_pawn_keys']
TV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_red_rook_values','s_red_knight_values','s_red_cannon_values','s_red_king_pawn_values']\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_black_rook_values','s_black_knight_values','s_black_cannon_values','s_black_king_pawn_values']
dict['tlocks'] = d1a_str(TL, lambda x: x)
dict['tkeys'] = d1a_str(TK, lambda x: x)
dict['tvalues'] = d1a_str(TV, lambda x: x)
#template = string.Template(template)
template = open(os.path.join(template_path, 'xq_position_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'xq_position_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def u(x): return SquareDowns[x]
def d(x): return SquareUps[x]
def l(x): return SquareLefts[x]
def r(x): return SquareRights[x]
SquareDowns = [0]*91
SquareUps = [0]*91
SquareLefts = [0]*91
SquareRights = [0]*91
Xs = [9]*91
Ys = [10]*91
XYs = [[90]*16 for i in range(16)]
KnightLegs = [[90]*128 for i in range(91)]
BishopEyes = [[90]*128 for i in range(91)]
def info():
def _(x, y):
if x < 0 or x >8 or y < 0 or y > 9:
return 90
return x + 9 * y
for sq in range(90):
#x, y = sq % 9, sq / 9
y, x = divmod(sq, 9)
SquareDowns[sq] = _(x, y - 1)
SquareUps[sq] = _(x, y + 1)
SquareLefts[sq] = _(x - 1, y)
SquareRights[sq] = _(x + 1, y)
Xs[sq] = x
Ys[sq] = y
XYs[y][x] = sq
SquareDowns[90] = 90
SquareUps[90] = 90
SquareLefts[90] = 90
SquareRights[90] = 90
info()
def leg():
u = lambda s:SquareDowns[s]
d = lambda s:SquareUps[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
for src in range(90):
leg = u(src)
dst = l(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = d(src)
dst = l(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = l(src)
dst = u(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = r(src)
dst = u(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg()
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
def MoveFlags():
u = lambda s:SquareDowns[s]
d = lambda s:SquareUps[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
MoveFlags = [[0]*128 for i in range(91)]
for src in range(90):
sf = CoordinateFlags[src]
#red king
if sf & RedKingFlag:
for dst in [u(src), d(src), l(src), r(src)]:
if CoordinateFlags[dst] & RedKingFlag:
#这里加上兵的flag主要是为了可以区分和将见面的情况,下同
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKingFlag | RedPawnFlag
#black king
elif sf & BlackKingFlag:
for dst in [u(src), d(src), l(src), r(src)]:
if CoordinateFlags[dst] & BlackKingFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackKingFlag | BlackPawnFlag
#red advisor
if sf & RedAdvisorFlag:
for dst in [l(u(src)), l(d(src)), r(u(src)), r(d(src))]:
if CoordinateFlags[dst] & RedAdvisorFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedAdvisorFlag
#black advisor
elif sf & BlackAdvisorFlag:
for dst in [l(u(src)), l(d(src)), r(u(src)), r(d(src))]:
if CoordinateFlags[dst] & BlackAdvisorFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackAdvisorFlag
#red bishop
elif sf & RedBishopFlag:
for dst in [l(l(u(u(src)))), l(l(d(d(src)))), r(r(u(u(src)))), r(r(d(d(src))))]:
if CoordinateFlags[dst] & RedBishopFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedBishopFlag
#black bishop
elif sf & BlackBishopFlag:
for dst in [l(l(u(u(src)))), l(l(d(d(src)))), r(r(u(u(src)))), r(r(d(d(src))))]:
if CoordinateFlags[dst] & BlackBishopFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackBishopFlag
#knight
for dst in [l(u(u(src))), l(d(d(src))), r(u(u(src))), r(d(d(src))), l(l(u(src))), l(l(d(src))), r(r(u(src))), r(r(d(src)))]:
if dst in range(90):
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKnightFlag | BlackKnightFlag
#red pawn
if sf & RedPawnFlag:
for dst in [l(src), r(src), d(src)]:
if CoordinateFlags[dst] & RedPawnFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedPawnFlag
#black pawn
if sf & BlackPawnFlag:
for dst in [l(src), r(src), u(src)]:
if CoordinateFlags[dst] & BlackPawnFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackPawnFlag
for dst in range(90):
df = CoordinateFlags[dst]
if sf & RedKingFlag and df & BlackKingFlag and src%9 == dst%9:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKingFlag
elif sf & BlackKingFlag and df & RedKingFlag and src%9 == dst%9:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackKingFlag
#rook cannon
if src != dst:
if src%9 == dst%9 or src/9 == dst/9:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedRookFlag | RedCannonFlag | BlackRookFlag | BlackCannonFlag
return MoveFlags
MoveFlags=MoveFlags()
def KnightMoves():
KnightMoves = [[23130]*16 for i in range(91)]
for sq in range(90):
ls = KnightMoves[sq]
leg = u(sq)
for dst in [l(u(leg)),r(u(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = d(sq)
for dst in [l(d(leg)),r(d(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = l(sq)
for dst in [u(l(leg)),d(l(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = r(sq)
for dst in [u(r(leg)),d(r(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
return KnightMoves
KnightMoves = KnightMoves()
def RedKingPawnMoves():
RedKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = RedKingPawnMoves[sq]
flag = CoordinateFlags[sq]
sqs = []
if flag & RedKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if CoordinateFlags[i] & RedKingFlag]
elif flag & RedPawnFlag:
sqs = [i for i in [d(sq), l(sq), r(sq)] if CoordinateFlags[i] & RedPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return RedKingPawnMoves
RedKingPawnMoves = RedKingPawnMoves()
def BlackKingPawnMoves():
BlackKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = BlackKingPawnMoves[sq]
flag = CoordinateFlags[sq]
sqs = []
if flag & BlackKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if CoordinateFlags[i] & BlackKingFlag]
elif flag & BlackPawnFlag:
sqs = [i for i in [u(sq), l(sq), r(sq)] if CoordinateFlags[i] & BlackPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return BlackKingPawnMoves
BlackKingPawnMoves = BlackKingPawnMoves()
def AdvisorBishopMoves():
AdvisorBishopMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = AdvisorBishopMoves[sq]
flag = CoordinateFlags[sq]
if flag & BishopFlag:
for square in [u(u(r(r(sq)))), u(u(l(l(sq)))), d(d(r(r(sq)))), d(d(l(l(sq))))]:
if CoordinateFlags[square] & BishopFlag:
Moves[Moves.index(90)] = square
elif flag & AdvisorFlag:
for square in [u(l(sq)), u(r(sq)), d(l(sq)), d(r(sq))]:
if CoordinateFlags[square] & AdvisorFlag:
Moves[Moves.index(90)] = square
return AdvisorBishopMoves
AdvisorBishopMoves = AdvisorBishopMoves()
def main():
dict = {}
dict['xs'] = d1a_str(Xs, u32)
dict['ys'] = d1a_str(Ys, u32)
dict['xys'] = d2a_str(XYs, u32)
dict['downs'] = d1a_str(SquareDowns, u32)
dict['ups'] = d1a_str(SquareUps, u32)
dict['lefts'] = d1a_str(SquareLefts, u32)
dict['rights'] = d1a_str(SquareRights, u32)
dict['coordinate_flags'] = d1a_str(CoordinateFlags, u32)
dict ['ptypes'] = d1a_str(PT, u32)
dict ['pflags'] = d1a_str(PF, u32)
dict ['pcolors'] = d1a_str(PC, u32)
dict ['mf'] = d2a_str(MoveFlags, u32)
dict ['kl'] = d2a_str(KnightLegs, u32)
dict['nm'] = d2a_str(KnightMoves, u32)
dict['rkpm'] = d2a_str(RedKingPawnMoves, u32)
dict['bkpm'] = d2a_str(BlackKingPawnMoves, u32)
dict['abm'] = d2a_str(AdvisorBishopMoves, u32)
template = open(os.path.join(template_path, 'xq_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'xq_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
RedKing = 0;
RedAdvisor = 1;
RedBishop = 2;
RedRook = 3;
RedKnight = 4;
RedCannon = 5;
RedPawn = 6;
BlackKing = 7;
BlackAdvisor = 8;
BlackBishop = 9;
BlackRook = 10;
BlackKnight = 11;
BlackCannon = 12;
BlackPawn = 13;
EmptyType = 14;
InvalidType = 15;
RedKingFlag = 1 << 0;
RedAdvisorFlag = 1 << 1;
RedBishopFlag = 1 << 2;
RedRookFlag = 1 << 3;
RedKnightFlag = 1 << 4;
RedCannonFlag = 1 << 5;
RedPawnFlag = 1 << 6;
BlackKingFlag = 1 << 7;
BlackAdvisorFlag = 1 << 8;
BlackBishopFlag = 1 << 9;
BlackRookFlag = 1 << 10;
BlackKnightFlag = 1 << 11;
BlackCannonFlag = 1 << 12;
BlackPawnFlag = 1 << 13;
EmptyFlag = 1 << 14;
InvaildFlag = 1 << 15;
AdvisorFlag = RedAdvisorFlag | BlackAdvisorFlag
BishopFlag = RedBishopFlag | BlackBishopFlag
RedKingPawnFlag = RedKingFlag | RedPawnFlag
AdvisorBishopFlag = RedAdvisorFlag | RedBishopFlag | BlackAdvisorFlag | BlackBishopFlag
def _(x, y): return x + y * 9
RedKingCoordinates = [_(x, y) for x in [3, 4, 5] for y in [0, 1, 2]]
RedAdvisorCoordinates = [_(4, 1)] + [_(x, y) for x in [3, 5] for y in [0, 2]]
RedBishopCoordinates = [_(x, y) for x in [0, 4, 8] for y in [2]] + [_(x, y) for x in [2, 6] for y in [0, 4]]
RedRookCoordinates = range(90)
RedKnightCoordinates = range(90)
RedCannonCoordinates = range(90)
RedPawnCoordinates = [_(x, y) for x in [0, 2, 4, 6, 8] for y in [3, 4]]
RedPawnCoordinates.extend(range(45, 90))
BlackKingCoordinates = [89 - sq for sq in RedKingCoordinates]
BlackAdvisorCoordinates = [89 - sq for sq in RedAdvisorCoordinates]
BlackBishopCoordinates = [89 - sq for sq in RedBishopCoordinates]
BlackRookCoordinates = [89 - sq for sq in RedRookCoordinates]
BlackKnightCoordinates = [89 - sq for sq in RedKnightCoordinates]
BlackCannonCoordinates = [89 - sq for sq in RedCannonCoordinates]
BlackPawnCoordinates = [89 - sq for sq in RedPawnCoordinates]
def CoordinateFlags():
CoordinateFlags = [0]*91
for sq in RedKingCoordinates:
CoordinateFlags[sq] |= RedKingFlag
CoordinateFlags[89 - sq] |= BlackKingFlag
for sq in RedAdvisorCoordinates:
CoordinateFlags[sq] |= RedAdvisorFlag
CoordinateFlags[89 - sq] |= BlackAdvisorFlag
for sq in RedBishopCoordinates:
CoordinateFlags[sq] |= RedBishopFlag
CoordinateFlags[89 - sq] |= BlackBishopFlag
for sq in RedPawnCoordinates:
CoordinateFlags[sq] |= RedPawnFlag
CoordinateFlags[89 - sq] |= BlackPawnFlag
for sq in range(90):
CoordinateFlags[sq] |= RedRookFlag
CoordinateFlags[sq] |= RedKnightFlag
CoordinateFlags[sq] |= RedCannonFlag
CoordinateFlags[sq] |= BlackRookFlag
CoordinateFlags[sq] |= BlackKnightFlag
CoordinateFlags[sq] |= BlackCannonFlag
CoordinateFlags[sq] |= EmptyFlag
CoordinateFlags[90] |= InvaildFlag
return CoordinateFlags
CoordinateFlags = CoordinateFlags()
def u64(i):
return str(i)+'ULL'
def u32(i):
return str(i)+'UL'
def s32(i):
return str(i)+'L'
def d1a_str(array_1d, func):
array_1d = [func(i) for i in array_1d]
return ', '.join(array_1d)
def d2a_str(array_2d, func):
array_2d = ['{%s}'%d1a_str(array_1d, func) for array_1d in array_2d]
return ',\n'.join(array_2d)
import os
script_path = os.path.abspath(os.path.dirname(__file__))
work_path = os.path.dirname(script_path)
folium_path = os.path.join(work_path, 'xq')
template_path = os.path.join(script_path, 'template')
if not os.path.exists(template_path):
os.mkdir(template_path)
if __name__ == "__main__":
print work_path
print script_path
print folium_path
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def u(x): return SquareDowns[x]
def d(x): return SquareUps[x]
def l(x): return SquareLefts[x]
def r(x): return SquareRights[x]
SquareDowns = [0]*91
SquareUps = [0]*91
SquareLefts = [0]*91
SquareRights = [0]*91
Xs = [9]*91
Ys = [10]*91
XYs = [[90]*16 for i in range(16)]
KnightLegs = [[90]*128 for i in range(91)]
BishopEyes = [[90]*128 for i in range(91)]
def info():
def _(x, y):
if x < 0 or x >8 or y < 0 or y > 9:
return 90
return x + 9 * y
for sq in range(90):
#x, y = sq % 9, sq / 9
y, x = divmod(sq, 9)
SquareDowns[sq] = _(x, y - 1)
SquareUps[sq] = _(x, y + 1)
SquareLefts[sq] = _(x - 1, y)
SquareRights[sq] = _(x + 1, y)
Xs[sq] = x
Ys[sq] = y
XYs[y][x] = sq
SquareDowns[90] = 90
SquareUps[90] = 90
SquareLefts[90] = 90
SquareRights[90] = 90
info()
def leg():
u = lambda s:SquareDowns[s]
d = lambda s:SquareUps[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
for src in range(90):
leg = u(src)
dst = l(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = d(src)
dst = l(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = l(src)
dst = u(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = r(src)
dst = u(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg()
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
def MoveFlags():
u = lambda s:SquareDowns[s]
d = lambda s:SquareUps[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
MoveFlags = [[0]*128 for i in range(91)]
for src in range(90):
sf = CoordinateFlags[src]
#red king
if sf & RedKingFlag:
for dst in [u(src), d(src), l(src), r(src)]:
if CoordinateFlags[dst] & RedKingFlag:
#这里加上兵的flag主要是为了可以区分和将见面的情况,下同
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKingFlag | RedPawnFlag
#black king
elif sf & BlackKingFlag:
for dst in [u(src), d(src), l(src), r(src)]:
if CoordinateFlags[dst] & BlackKingFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackKingFlag | BlackPawnFlag
#red advisor
if sf & RedAdvisorFlag:
for dst in [l(u(src)), l(d(src)), r(u(src)), r(d(src))]:
if CoordinateFlags[dst] & RedAdvisorFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedAdvisorFlag
#black advisor
elif sf & BlackAdvisorFlag:
for dst in [l(u(src)), l(d(src)), r(u(src)), r(d(src))]:
if CoordinateFlags[dst] & BlackAdvisorFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackAdvisorFlag
#red bishop
elif sf & RedBishopFlag:
for dst in [l(l(u(u(src)))), l(l(d(d(src)))), r(r(u(u(src)))), r(r(d(d(src))))]:
if CoordinateFlags[dst] & RedBishopFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedBishopFlag
#black bishop
elif sf & BlackBishopFlag:
for dst in [l(l(u(u(src)))), l(l(d(d(src)))), r(r(u(u(src)))), r(r(d(d(src))))]:
if CoordinateFlags[dst] & BlackBishopFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackBishopFlag
#knight
for dst in [l(u(u(src))), l(d(d(src))), r(u(u(src))), r(d(d(src))), l(l(u(src))), l(l(d(src))), r(r(u(src))), r(r(d(src)))]:
if dst in range(90):
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKnightFlag | BlackKnightFlag
#red pawn
if sf & RedPawnFlag:
for dst in [l(src), r(src), d(src)]:
if CoordinateFlags[dst] & RedPawnFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedPawnFlag
#black pawn
if sf & BlackPawnFlag:
for dst in [l(src), r(src), u(src)]:
if CoordinateFlags[dst] & BlackPawnFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackPawnFlag
for dst in range(90):
df = CoordinateFlags[dst]
if sf & RedKingFlag and df & BlackKingFlag and src%9 == dst%9:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKingFlag
elif sf & BlackKingFlag and df & RedKingFlag and src%9 == dst%9:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackKingFlag
#rook cannon
if src != dst:
if src%9 == dst%9 or src/9 == dst/9:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedRookFlag | RedCannonFlag | BlackRookFlag | BlackCannonFlag
return MoveFlags
MoveFlags=MoveFlags()
def KnightMoves():
KnightMoves = [[23130]*16 for i in range(91)]
for sq in range(90):
ls = KnightMoves[sq]
leg = u(sq)
for dst in [l(u(leg)),r(u(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = d(sq)
for dst in [l(d(leg)),r(d(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = l(sq)
for dst in [u(l(leg)),d(l(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = r(sq)
for dst in [u(r(leg)),d(r(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
return KnightMoves
KnightMoves = KnightMoves()
def RedKingPawnMoves():
RedKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = RedKingPawnMoves[sq]
flag = CoordinateFlags[sq]
sqs = []
if flag & RedKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if CoordinateFlags[i] & RedKingFlag]
elif flag & RedPawnFlag:
sqs = [i for i in [d(sq), l(sq), r(sq)] if CoordinateFlags[i] & RedPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return RedKingPawnMoves
RedKingPawnMoves = RedKingPawnMoves()
def BlackKingPawnMoves():
BlackKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = BlackKingPawnMoves[sq]
flag = CoordinateFlags[sq]
sqs = []
if flag & BlackKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if CoordinateFlags[i] & BlackKingFlag]
elif flag & BlackPawnFlag:
sqs = [i for i in [u(sq), l(sq), r(sq)] if CoordinateFlags[i] & BlackPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return BlackKingPawnMoves
BlackKingPawnMoves = BlackKingPawnMoves()
def AdvisorBishopMoves():
AdvisorBishopMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = AdvisorBishopMoves[sq]
flag = CoordinateFlags[sq]
if flag & BishopFlag:
for square in [u(u(r(r(sq)))), u(u(l(l(sq)))), d(d(r(r(sq)))), d(d(l(l(sq))))]:
if CoordinateFlags[square] & BishopFlag:
Moves[Moves.index(90)] = square
elif flag & AdvisorFlag:
for square in [u(l(sq)), u(r(sq)), d(l(sq)), d(r(sq))]:
if CoordinateFlags[square] & AdvisorFlag:
Moves[Moves.index(90)] = square
return AdvisorBishopMoves
AdvisorBishopMoves = AdvisorBishopMoves()
def main():
dict = {}
dict['xs'] = d1a_str(Xs, u32)
dict['ys'] = d1a_str(Ys, u32)
dict['xys'] = d2a_str(XYs, u32)
dict['downs'] = d1a_str(SquareDowns, u32)
dict['ups'] = d1a_str(SquareUps, u32)
dict['lefts'] = d1a_str(SquareLefts, u32)
dict['rights'] = d1a_str(SquareRights, u32)
dict['coordinate_flags'] = d1a_str(CoordinateFlags, u32)
dict ['ptypes'] = d1a_str(PT, u32)
dict ['pflags'] = d1a_str(PF, u32)
dict ['pcolors'] = d1a_str(PC, u32)
dict ['mf'] = d2a_str(MoveFlags, u32)
dict ['kl'] = d2a_str(KnightLegs, u32)
dict['nm'] = d2a_str(KnightMoves, u32)
dict['rkpm'] = d2a_str(RedKingPawnMoves, u32)
dict['bkpm'] = d2a_str(BlackKingPawnMoves, u32)
dict['abm'] = d2a_str(AdvisorBishopMoves, u32)
template = open(os.path.join(template_path, 'xq_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'xq_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
RedKingPawnValues = [0]*91
BlackKingPawnValues = [0]*91
AdvisorBishopValues = [0]*91
RedRookValues = [0]*91
BlackRookValues = [0]*91
RedKnightValues = [0]*91
BlackKnightValues = [0]*91
RedCannonValues = [0]*91
BlackCannonValues = [0]*91
def value():
KingBaseValue = 5000
AdvisorBaseValue = 40
BishopBaseValue = 40
RookBaseValue = 200
KnightBaseValue = 88
CannonBaseValue = 96
PawnBaseValue = 9
RedKingPawnPositionValues = [
0, 0, 0, 1, 5, 1, 0, 0, 0,
0, 0, 0, -8, -8, -8, 0, 0, 0,
0, 0, 0, -9, -9, -9, 0, 0, 0,
-2, 0, -2, 0, 6, 0, -2, 0, -2,
3, 0, 4, 0, 7, 0, 4, 0, 3,
10, 18, 22, 35, 40, 35, 22, 18, 10,
20, 27, 30, 40, 42, 40, 35, 27, 20,
20, 30, 45, 55, 55, 55, 45, 30, 20,
20, 30, 50, 65, 70, 65, 50, 30, 20,
0, 0, 0, 2, 4, 2, 0, 0, 0,]
RedAdvisorBishopPositionValues = [
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0,
-2, 0, 0, 0, 3, 0, 0, 0, -2,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,]
RedRookPositionValues = [
-6, 6, 4, 12, 0, 12, 4, 6, -6,
5, 8, 6, 12, 0, 12, 6, 8, 5,
-2, 8, 4, 12, 12, 12, 4, 8, -2,
4, 9, 4, 12, 14, 12, 4, 9, 4,
8, 12, 12, 14, 15, 14, 12, 12, 8,
8, 11, 11, 14, 15, 14, 11, 11, 8,
6, 13, 13, 16, 16, 16, 13, 13, 6,
6, 8, 7, 14, 16, 14, 7, 8, 6,
6, 12, 9, 16, 33, 16, 9, 12, 6,
6, 8, 7, 13, 14, 13, 7, 8, 6,]
RedKnightPositionValues = [
0, -3, 2, 0, 2, 0, 2, -3, 0,
-3, 2, 4, 5, -10, 5, 4, 2, -3,
5, 4, 6, 7, 4, 7, 6, 4, 5,
4, 6, 10, 7, 10, 7, 10, 6, 4,
2, 10, 13, 14, 15, 14, 13, 10, 2,
2, 12, 11, 15, 16, 15, 11, 12, 2,
5, 20, 12, 19, 12, 19, 12, 20, 5,
4, 10, 11, 15, 11, 15, 11, 10, 4,
2, 8, 15, 9, 6, 9, 15, 8, 2,
2, 2, 2, 8, 2, 8, 2, 2, 2,]
RedCannonPositionValues = [
0, 0, 1, 3, 3, 3, 1, 0, 0,
0, 1, 2, 2, 2, 2, 2, 1, 0,
1, 0, 4, 3, 5, 3, 4, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 3, 0, 4, 0, 3, 0, -1,
0, 0, 0, 0, 4, 0, 0, 0, 0,
0, 3, 3, 2, 4, 2, 3, 3, 0,
1, 1, 0, -5, -4, -5, 0, 1, 1,
2, 2, 0, -4, -7, -4, 0, 2, 2,
4, 4, 0, -5, -6, -5, 0, 4, 4,]
for sq in range(90):
flag = CoordinateFlags[sq]
if flag & RedKingFlag:
RedKingPawnValues[sq] = KingBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
if flag & RedAdvisorFlag:
AdvisorBishopValues[sq] = AdvisorBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
if flag & RedBishopFlag:
AdvisorBishopValues[sq] = BishopBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
RedRookValues[sq] = RookBaseValue + RedRookPositionValues[sq]
BlackRookValues[89 - sq] = -RedRookValues[sq]
RedKnightValues[sq] = KnightBaseValue + RedKnightPositionValues[sq]
BlackKnightValues[89 - sq] = -RedKnightValues[sq]
RedCannonValues[sq] = CannonBaseValue + RedCannonPositionValues[sq]
BlackCannonValues[89 - sq] = -RedCannonValues[sq]
if flag & RedPawnFlag:
RedKingPawnValues[sq] = PawnBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
value()
RedKingPawnLocks = [0]*91
BlackKingPawnLocks = [0]*91
AdvisorBishopLocks = [0]*91
RedRookLocks = [0]*91
BlackRookLocks = [0]*91
RedKnightLocks = [0]*91
BlackKnightLocks = [0]*91
RedCannonLocks = [0]*91
BlackCannonLocks = [0]*91
RedKingPawnKeys = [0]*91
BlackKingPawnKeys = [0]*91
AdvisorBishopKeys = [0]*91
RedRookKeys = [0]*91
BlackRookKeys = [0]*91
RedKnightKeys = [0]*91
BlackKnightKeys = [0]*91
RedCannonKeys = [0]*91
BlackCannonKeys = [0]*91
def hash():
from random import randint, seed
seed(51)
for sq in range(90):
flag = CoordinateFlags[sq]
if flag & RedKingPawnFlag:
RedKingPawnLocks[sq] = randint(0, 0x10000000000000000)
RedKingPawnKeys[sq] = randint(0, 0x100000000)
BlackKingPawnLocks[89 - sq] = randint(0, 0x10000000000000000)
BlackKingPawnKeys[89 - sq] = randint(0, 0x100000000)
if flag & AdvisorBishopFlag:
AdvisorBishopLocks[sq] = randint(0, 0x10000000000000000)
AdvisorBishopKeys[sq] = randint(0, 0x100000000)
RedRookLocks[sq] = randint(0, 0x10000000000000000)
RedRookKeys[sq] = randint(0, 0x100000000)
BlackRookLocks[sq] = randint(0, 0x10000000000000000)
BlackRookKeys[sq] = randint(0, 0x100000000)
RedKnightLocks[sq] = randint(0, 0x10000000000000000)
RedKnightKeys[sq] = randint(0, 0x100000000)
BlackKnightLocks[sq] = randint(0, 0x10000000000000000)
BlackKnightKeys[sq] = randint(0, 0x100000000)
RedCannonLocks[sq] = randint(0, 0x10000000000000000)
RedCannonKeys[sq] = randint(0, 0x100000000)
BlackCannonLocks[sq] = randint(0, 0x10000000000000000)
BlackCannonKeys[sq] = randint(0, 0x100000000)
file = open('hash.data')
for seq in [RedKingPawnLocks,BlackKingPawnLocks,AdvisorBishopLocks,RedRookLocks,BlackRookLocks,RedKnightLocks,BlackKnightLocks,RedCannonLocks,BlackCannonLocks]:
for i in range(90):
i1 = int(file.readline())
i2 = int(file.readline())
seq[i] = (i1<<32)|i2
for seq in [RedKingPawnKeys,BlackKingPawnKeys,AdvisorBishopKeys,RedRookKeys,BlackRookKeys,RedKnightKeys,BlackKnightKeys,RedCannonKeys,BlackCannonKeys]:
for i in range(90):
seq[i] = int(file.readline())
hash()
def main():
dict = {}
dict['rkpl'] = d1a_str(RedKingPawnLocks, u64)
dict['rkpk'] = d1a_str(RedKingPawnKeys, u32)
dict['rkpv'] = d1a_str(RedKingPawnValues, s32)
dict['bkpl'] = d1a_str(BlackKingPawnLocks, u64)
dict['bkpk'] = d1a_str(BlackKingPawnKeys, u32)
dict['bkpv'] = d1a_str(BlackKingPawnValues, s32)
dict['abl'] = d1a_str(AdvisorBishopLocks, u64)
dict['abk'] = d1a_str(AdvisorBishopKeys, u32)
dict['abv'] = d1a_str(AdvisorBishopValues, s32)
dict['rrl'] = d1a_str(RedRookLocks, u64)
dict['rrk'] = d1a_str(RedRookKeys, u32)
dict['rrv'] = d1a_str(RedRookValues, s32)
dict['brl'] = d1a_str(BlackRookLocks, u64)
dict['brk'] = d1a_str(BlackRookKeys, u32)
dict['brv'] = d1a_str(BlackRookValues, s32)
dict['rnl'] = d1a_str(RedKnightLocks, u64)
dict['rnk'] = d1a_str(RedKnightKeys, u32)
dict['rnv'] = d1a_str(RedKnightValues, s32)
dict['bnl'] = d1a_str(BlackKnightLocks, u64)
dict['bnk'] = d1a_str(BlackKnightKeys, u32)
dict['bnv'] = d1a_str(BlackKnightValues, s32)
dict['rcl'] = d1a_str(RedCannonLocks, u64)
dict['rck'] = d1a_str(RedCannonKeys, u32)
dict['rcv'] = d1a_str(RedCannonValues, s32)
dict['bcl'] = d1a_str(BlackCannonLocks, u64)
dict['bck'] = d1a_str(BlackCannonKeys, u32)
dict['bcv'] = d1a_str(BlackCannonValues, s32)
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
PL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_red_rook_locks']*2+['s_red_knight_locks']*2\
+['s_red_cannon_locks']*2+['s_red_king_pawn_locks']*5\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_black_rook_locks']*2+['s_black_knight_locks']*2\
+['s_black_cannon_locks']*2+['s_black_king_pawn_locks']*5
PK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_red_rook_keys']*2+['s_red_knight_keys']*2\
+['s_red_cannon_keys']*2+['s_red_king_pawn_keys']*5\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_black_rook_keys']*2+['s_black_knight_keys']*2\
+['s_black_cannon_keys']*2+['s_black_king_pawn_keys']*5
PV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*4+['s_red_rook_values']*2+['s_red_knight_values']*2\
+['s_red_cannon_values']*2+['s_red_king_pawn_values']*5\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*4+['s_black_rook_values']*2+['s_black_knight_values']*2\
+['s_black_cannon_values']*2+['s_black_king_pawn_values']*5
dict['plocks'] = d1a_str(PL, lambda x: x)
dict['pkeys'] = d1a_str(PK, lambda x: x)
dict['pvalues'] = d1a_str(PV, lambda x: x)
TL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_red_rook_locks','s_red_knight_locks','s_red_cannon_locks','s_red_king_pawn_locks']\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_black_rook_locks','s_black_knight_locks','s_black_cannon_locks','s_black_king_pawn_locks']
TK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_red_rook_keys','s_red_knight_keys','s_red_cannon_keys','s_red_king_pawn_keys']\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_black_rook_keys','s_black_knight_keys','s_black_cannon_keys','s_black_king_pawn_keys']
TV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_red_rook_values','s_red_knight_values','s_red_cannon_values','s_red_king_pawn_values']\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_black_rook_values','s_black_knight_values','s_black_cannon_values','s_black_king_pawn_values']
dict['tlocks'] = d1a_str(TL, lambda x: x)
dict['tkeys'] = d1a_str(TK, lambda x: x)
dict['tvalues'] = d1a_str(TV, lambda x: x)
#template = string.Template(template)
template = open(os.path.join(template_path, 'xq_position_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'xq_position_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
def main():
import sys
if len(sys.argv) == 1:
import snake.protocol
engine = snake.protocol.Engine()
engine.run()
if __name__ == "__main__":
main() | Python |
import xq_data
xq_data.main()
import engine_data
engine_data.main()
import history_data
history_data.main()
import bitlines_data
bitlines_data.main()
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
RedKingPawnValues = [0]*91
BlackKingPawnValues = [0]*91
AdvisorBishopValues = [0]*91
RedRookValues = [0]*91
BlackRookValues = [0]*91
RedKnightValues = [0]*91
BlackKnightValues = [0]*91
RedCannonValues = [0]*91
BlackCannonValues = [0]*91
def value():
KingBaseValue = 5000
AdvisorBaseValue = 40
BishopBaseValue = 40
RookBaseValue = 200
KnightBaseValue = 88
CannonBaseValue = 96
PawnBaseValue = 9
RedKingPawnPositionValues = [
0, 0, 0, 1, 5, 1, 0, 0, 0,
0, 0, 0, -8, -8, -8, 0, 0, 0,
0, 0, 0, -9, -9, -9, 0, 0, 0,
-2, 0, -2, 0, 6, 0, -2, 0, -2,
3, 0, 4, 0, 7, 0, 4, 0, 3,
10, 18, 22, 35, 40, 35, 22, 18, 10,
20, 27, 30, 40, 42, 40, 35, 27, 20,
20, 30, 45, 55, 55, 55, 45, 30, 20,
20, 30, 50, 65, 70, 65, 50, 30, 20,
0, 0, 0, 2, 4, 2, 0, 0, 0,]
RedAdvisorBishopPositionValues = [
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0,
-2, 0, 0, 0, 3, 0, 0, 0, -2,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,]
RedRookPositionValues = [
-6, 6, 4, 12, 0, 12, 4, 6, -6,
5, 8, 6, 12, 0, 12, 6, 8, 5,
-2, 8, 4, 12, 12, 12, 4, 8, -2,
4, 9, 4, 12, 14, 12, 4, 9, 4,
8, 12, 12, 14, 15, 14, 12, 12, 8,
8, 11, 11, 14, 15, 14, 11, 11, 8,
6, 13, 13, 16, 16, 16, 13, 13, 6,
6, 8, 7, 14, 16, 14, 7, 8, 6,
6, 12, 9, 16, 33, 16, 9, 12, 6,
6, 8, 7, 13, 14, 13, 7, 8, 6,]
RedKnightPositionValues = [
0, -3, 2, 0, 2, 0, 2, -3, 0,
-3, 2, 4, 5, -10, 5, 4, 2, -3,
5, 4, 6, 7, 4, 7, 6, 4, 5,
4, 6, 10, 7, 10, 7, 10, 6, 4,
2, 10, 13, 14, 15, 14, 13, 10, 2,
2, 12, 11, 15, 16, 15, 11, 12, 2,
5, 20, 12, 19, 12, 19, 12, 20, 5,
4, 10, 11, 15, 11, 15, 11, 10, 4,
2, 8, 15, 9, 6, 9, 15, 8, 2,
2, 2, 2, 8, 2, 8, 2, 2, 2,]
RedCannonPositionValues = [
0, 0, 1, 3, 3, 3, 1, 0, 0,
0, 1, 2, 2, 2, 2, 2, 1, 0,
1, 0, 4, 3, 5, 3, 4, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 3, 0, 4, 0, 3, 0, -1,
0, 0, 0, 0, 4, 0, 0, 0, 0,
0, 3, 3, 2, 4, 2, 3, 3, 0,
1, 1, 0, -5, -4, -5, 0, 1, 1,
2, 2, 0, -4, -7, -4, 0, 2, 2,
4, 4, 0, -5, -6, -5, 0, 4, 4,]
for sq in range(90):
flag = SquareFlags[sq]
if flag & RedKingFlag:
RedKingPawnValues[sq] = KingBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
if flag & RedAdvisorFlag:
AdvisorBishopValues[sq] = AdvisorBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
if flag & RedBishopFlag:
AdvisorBishopValues[sq] = BishopBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
RedRookValues[sq] = RookBaseValue + RedRookPositionValues[sq]
BlackRookValues[89 - sq] = -RedRookValues[sq]
RedKnightValues[sq] = KnightBaseValue + RedKnightPositionValues[sq]
BlackKnightValues[89 - sq] = -RedKnightValues[sq]
RedCannonValues[sq] = CannonBaseValue + RedCannonPositionValues[sq]
BlackCannonValues[89 - sq] = -RedCannonValues[sq]
if flag & RedPawnFlag:
RedKingPawnValues[sq] = PawnBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
value()
RedKingPawnLocks = [0]*91
BlackKingPawnLocks = [0]*91
AdvisorBishopLocks = [0]*91
RedRookLocks = [0]*91
BlackRookLocks = [0]*91
RedKnightLocks = [0]*91
BlackKnightLocks = [0]*91
RedCannonLocks = [0]*91
BlackCannonLocks = [0]*91
RedKingPawnKeys = [0]*91
BlackKingPawnKeys = [0]*91
AdvisorBishopKeys = [0]*91
RedRookKeys = [0]*91
BlackRookKeys = [0]*91
RedKnightKeys = [0]*91
BlackKnightKeys = [0]*91
RedCannonKeys = [0]*91
BlackCannonKeys = [0]*91
def hash():
from random import randint, seed
seed(51)
for sq in range(90):
flag = SquareFlags[sq]
if flag & RedKingPawnFlag:
RedKingPawnLocks[sq] = randint(0, 0x10000000000000000)
RedKingPawnKeys[sq] = randint(0, 0x100000000)
BlackKingPawnLocks[89 - sq] = randint(0, 0x10000000000000000)
BlackKingPawnKeys[89 - sq] = randint(0, 0x100000000)
if flag & AdvisorBishopFlag:
AdvisorBishopLocks[sq] = randint(0, 0x10000000000000000)
AdvisorBishopKeys[sq] = randint(0, 0x100000000)
RedRookLocks[sq] = randint(0, 0x10000000000000000)
RedRookKeys[sq] = randint(0, 0x100000000)
BlackRookLocks[sq] = randint(0, 0x10000000000000000)
BlackRookKeys[sq] = randint(0, 0x100000000)
RedKnightLocks[sq] = randint(0, 0x10000000000000000)
RedKnightKeys[sq] = randint(0, 0x100000000)
BlackKnightLocks[sq] = randint(0, 0x10000000000000000)
BlackKnightKeys[sq] = randint(0, 0x100000000)
RedCannonLocks[sq] = randint(0, 0x10000000000000000)
RedCannonKeys[sq] = randint(0, 0x100000000)
BlackCannonLocks[sq] = randint(0, 0x10000000000000000)
BlackCannonKeys[sq] = randint(0, 0x100000000)
hash()
def main():
dict = {}
dict['rkpl'] = d1a_str(RedKingPawnLocks, u64)
dict['rkpk'] = d1a_str(RedKingPawnKeys, u32)
dict['rkpv'] = d1a_str(RedKingPawnValues, s32)
dict['bkpl'] = d1a_str(BlackKingPawnLocks, u64)
dict['bkpk'] = d1a_str(BlackKingPawnKeys, u32)
dict['bkpv'] = d1a_str(BlackKingPawnValues, s32)
dict['abl'] = d1a_str(AdvisorBishopLocks, u64)
dict['abk'] = d1a_str(AdvisorBishopKeys, u32)
dict['abv'] = d1a_str(AdvisorBishopValues, s32)
dict['rrl'] = d1a_str(RedRookLocks, u64)
dict['rrk'] = d1a_str(RedRookKeys, u32)
dict['rrv'] = d1a_str(RedRookValues, s32)
dict['brl'] = d1a_str(BlackRookLocks, u64)
dict['brk'] = d1a_str(BlackRookKeys, u32)
dict['brv'] = d1a_str(BlackRookValues, s32)
dict['rnl'] = d1a_str(RedKnightLocks, u64)
dict['rnk'] = d1a_str(RedKnightKeys, u32)
dict['rnv'] = d1a_str(RedKnightValues, s32)
dict['bnl'] = d1a_str(BlackKnightLocks, u64)
dict['bnk'] = d1a_str(BlackKnightKeys, u32)
dict['bnv'] = d1a_str(BlackKnightValues, s32)
dict['rcl'] = d1a_str(RedCannonLocks, u64)
dict['rck'] = d1a_str(RedCannonKeys, u32)
dict['rcv'] = d1a_str(RedCannonValues, s32)
dict['bcl'] = d1a_str(BlackCannonLocks, u64)
dict['bck'] = d1a_str(BlackCannonKeys, u32)
dict['bcv'] = d1a_str(BlackCannonValues, s32)
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
PL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_red_rook_locks']*2+['s_red_knight_locks']*2\
+['s_red_cannon_locks']*2+['s_red_king_pawn_locks']*5\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_black_rook_locks']*2+['s_black_knight_locks']*2\
+['s_black_cannon_locks']*2+['s_black_king_pawn_locks']*5
PK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_red_rook_keys']*2+['s_red_knight_keys']*2\
+['s_red_cannon_keys']*2+['s_red_king_pawn_keys']*5\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_black_rook_keys']*2+['s_black_knight_keys']*2\
+['s_black_cannon_keys']*2+['s_black_king_pawn_keys']*5
PV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*4+['s_red_rook_values']*2+['s_red_knight_values']*2\
+['s_red_cannon_values']*2+['s_red_king_pawn_values']*5\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*4+['s_black_rook_values']*2+['s_black_knight_values']*2\
+['s_black_cannon_values']*2+['s_black_king_pawn_values']*5
dict['plocks'] = d1a_str(PL, lambda x: x)
dict['pkeys'] = d1a_str(PK, lambda x: x)
dict['pvalues'] = d1a_str(PV, lambda x: x)
TL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_red_rook_locks','s_red_knight_locks','s_red_cannon_locks','s_red_king_pawn_locks']\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_black_rook_locks','s_black_knight_locks','s_black_cannon_locks','s_black_king_pawn_locks']
TK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_red_rook_keys','s_red_knight_keys','s_red_cannon_keys','s_red_king_pawn_keys']\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_black_rook_keys','s_black_knight_keys','s_black_cannon_keys','s_black_king_pawn_keys']
TV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_red_rook_values','s_red_knight_values','s_red_cannon_values','s_red_king_pawn_values']\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_black_rook_values','s_black_knight_values','s_black_cannon_values','s_black_king_pawn_values']
dict['tlocks'] = d1a_str(TL, lambda x: x)
dict['tkeys'] = d1a_str(TK, lambda x: x)
dict['tvalues'] = d1a_str(TV, lambda x: x)
#template = string.Template(template)
template = open(os.path.join(template_path, 'engine_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'engine_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
def capture_scores():
capture_scores = [[0]*32 for i in range(32)]
e = [10000, 1041, 1040, 2000, 1088, 1096, 1020]
m = [1000, 41, 40, 200, 88, 96, 20]
def level(src_type, dst_type): return levels[src_type][dst_type]
def color(piece): return PC[piece]
def type(piece):
t = PT[piece]
if t >= 7:
t -= 7
return t
for src_piece in range(32):
for dst_piece in range(32):
if color(src_piece) != color(dst_piece):
src_type = type(src_piece)
dst_type = type(dst_piece)
capture_scores[src_piece][dst_piece] = e[dst_type] - m[src_type]
return capture_scores
capture_scores = capture_scores()
def main():
dict = {}
dict['scores'] = d2a_str(capture_scores, u32)
template = open(os.path.join(template_path, 'history_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'history_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def main():
line_infos = [[0]*1024 for i in range(10)]
for idx in range(10):
for flag in range(1024):
flag |= 0xFC00
p1 = (idx and [idx-1] or [0xF])[0]
while not flag & (1 << p1):
p1 = (p1 and [p1-1] or [0xF])[0]
p2 = (p1 and [p1-1] or [0xF])[0]
while not flag & (1 << p2):
p2 = (p2 and [p2-1] or [0xF])[0]
n1 = idx + 1
while not flag & (1 << n1):
n1 += 1
n2 = n1 + 1
while not flag & (1 << n2):
n2 += 1
line_infos[idx][flag & 1023] = p1 | (p2 << 8) | (n1 << 16) | (n2 << 24)
dict = {}
dict['infos'] = d2a_str(line_infos, u32)
template = open(os.path.join(template_path, 'bitlines_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'bitlines_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
RedKing = 0;
RedAdvisor = 1;
RedBishop = 2;
RedRook = 3;
RedKnight = 4;
RedCannon = 5;
RedPawn = 6;
BlackKing = 7;
BlackAdvisor = 8;
BlackBishop = 9;
BlackRook = 10;
BlackKnight = 11;
BlackCannon = 12;
BlackPawn = 13;
EmptyType = 14;
InvalidType = 15;
RedKingFlag = 1 << 0;
RedAdvisorFlag = 1 << 1;
RedBishopFlag = 1 << 2;
RedRookFlag = 1 << 3;
RedKnightFlag = 1 << 4;
RedCannonFlag = 1 << 5;
RedPawnFlag = 1 << 6;
BlackKingFlag = 1 << 7;
BlackAdvisorFlag = 1 << 8;
BlackBishopFlag = 1 << 9;
BlackRookFlag = 1 << 10;
BlackKnightFlag = 1 << 11;
BlackCannonFlag = 1 << 12;
BlackPawnFlag = 1 << 13;
EmptyFlag = 1 << 14;
InvaildFlag = 1 << 15;
AdvisorFlag = RedAdvisorFlag | BlackAdvisorFlag
BishopFlag = RedBishopFlag | BlackBishopFlag
RedKingPawnFlag = RedKingFlag | RedPawnFlag
AdvisorBishopFlag = RedAdvisorFlag | RedBishopFlag | BlackAdvisorFlag | BlackBishopFlag
RedKingSquares = [x + y * 9 for x, y in [(3, 0), (4, 0), (5, 0), (3, 1), (4, 1), (5, 1), (3, 2), (4, 2), (5, 2)]]
RedAdvisorSquares = [x + y * 9 for x, y in [(3,0), (5,0), (4, 1), (3,2), (5,2)]]
RedBishopSquares = [x + y * 9 for x, y in [(2, 0), (6, 0), (0, 2), (4, 2), (8, 2), (2, 4), (6, 4)]]
RedRookSquares = range(90)
RedKnightSquares = range(90)
RedCannonSquares = range(90)
RedPawnSquares = [x + y * 9 for x, y in [(0, 3), (2, 3), (4, 3), (6, 3), (8, 3), (0, 4), (2, 4), (4, 4), (6, 4), (8, 4)]]
RedPawnSquares.extend(range(45, 90))
BlackKingSquares = [89 - sq for sq in RedKingSquares]
BlackAdvisorSquares = [89 - sq for sq in RedAdvisorSquares]
BlackBishopSquares = [89 - sq for sq in RedBishopSquares]
BlackRookSquares = [89 - sq for sq in RedRookSquares]
BlackKnightSquares = [89 - sq for sq in RedKnightSquares]
BlackCannonSquares = [89 - sq for sq in RedCannonSquares]
BlackPawnSquares = [89 - sq for sq in RedPawnSquares]
def SquareFlags():
SquareFlags = [0]*91
for sq in RedKingSquares:
SquareFlags[sq] |= RedKingFlag
SquareFlags[89 - sq] |= BlackKingFlag
for sq in RedAdvisorSquares:
SquareFlags[sq] |= RedAdvisorFlag
SquareFlags[89 - sq] |= BlackAdvisorFlag
for sq in RedBishopSquares:
SquareFlags[sq] |= RedBishopFlag
SquareFlags[89 - sq] |= BlackBishopFlag
for sq in RedPawnSquares:
SquareFlags[sq] |= RedPawnFlag
SquareFlags[89 - sq] |= BlackPawnFlag
for sq in range(90):
SquareFlags[sq] |= RedRookFlag
SquareFlags[sq] |= RedKnightFlag
SquareFlags[sq] |= RedCannonFlag
SquareFlags[sq] |= BlackRookFlag
SquareFlags[sq] |= BlackKnightFlag
SquareFlags[sq] |= BlackCannonFlag
SquareFlags[sq] |= EmptyFlag
SquareFlags[90] |= InvaildFlag
return SquareFlags
SquareFlags = SquareFlags()
def u64(i):
return str(i)+'ULL'
def u32(i):
return str(i)+'UL'
def s32(i):
return str(i)+'L'
def d1a_str(array_1d, func):
array_1d = [func(i) for i in array_1d]
return ', '.join(array_1d)
def d2a_str(array_2d, func):
array_2d = ['{%s}'%d1a_str(array_1d, func) for array_1d in array_2d]
return ',\n'.join(array_2d)
import os
script_path = os.path.abspath(os.path.dirname(__file__))
work_path = os.path.dirname(script_path)
folium_path = os.path.join(work_path, 'src')
template_path = os.path.join(script_path, 'template')
if not os.path.exists(template_path):
os.mkdir(template_path)
if __name__ == "__main__":
print work_path
print script_path
print folium_path
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def u(x): return SquareUps[x]
def d(x): return SquareDowns[x]
def l(x): return SquareLefts[x]
def r(x): return SquareRights[x]
SquareUps = [0]*91
SquareDowns = [0]*91
SquareLefts = [0]*91
SquareRights = [0]*91
Xs = [9]*91
Ys = [10]*91
XYs = [[90]*16 for i in range(16)]
KnightLegs = [[90]*128 for i in range(91)]
BishopEyes = [[90]*128 for i in range(91)]
def info():
def _(x, y):
if x < 0 or x >8 or y < 0 or y > 9:
return 90
return x + 9 * y
for sq in range(90):
#x, y = sq % 9, sq / 9
y, x = divmod(sq, 9)
SquareUps[sq] = _(x, y - 1)
SquareDowns[sq] = _(x, y + 1)
SquareLefts[sq] = _(x - 1, y)
SquareRights[sq] = _(x + 1, y)
Xs[sq] = x
Ys[sq] = y
XYs[y][x] = sq
SquareUps[90] = 90
SquareDowns[90] = 90
SquareLefts[90] = 90
SquareRights[90] = 90
info()
def leg():
u = lambda s:SquareUps[s]
d = lambda s:SquareDowns[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
for src in range(90):
leg = u(src)
dst = l(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = d(src)
dst = l(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = l(src)
dst = u(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = r(src)
dst = u(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg()
def eye():
u = lambda s:SquareUps[s]
d = lambda s:SquareDowns[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
for src in range(90):
if not (SquareFlags[src] & BishopFlag):
continue
dst = u(u(l(l(src))))
if (SquareFlags[dst] & BishopFlag):BishopEyes[src][dst] = (src + dst)/2
dst = u(u(r(r(src))))
if (SquareFlags[dst] & BishopFlag):BishopEyes[src][dst] = (src + dst)/2
dst = d(d(l(l(src))))
if (SquareFlags[dst] & BishopFlag):BishopEyes[src][dst] = (src + dst)/2
dst = d(d(r(r(src))))
if (SquareFlags[dst] & BishopFlag):BishopEyes[src][dst] = (src + dst)/2
eye()
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
def KnightMoves():
KnightMoves = [[23130]*16 for i in range(91)]
for sq in range(90):
ls = KnightMoves[sq]
leg = u(sq)
for dst in [l(u(leg)),r(u(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = d(sq)
for dst in [l(d(leg)),r(d(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = l(sq)
for dst in [u(l(leg)),d(l(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = r(sq)
for dst in [u(r(leg)),d(r(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
return KnightMoves
KnightMoves = KnightMoves()
def RedKingPawnMoves():
RedKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = RedKingPawnMoves[sq]
flag = SquareFlags[sq]
sqs = []
if flag & RedKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if SquareFlags[i] & RedKingFlag]
elif flag & RedPawnFlag:
sqs = [i for i in [d(sq), l(sq), r(sq)] if SquareFlags[i] & RedPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return RedKingPawnMoves
RedKingPawnMoves = RedKingPawnMoves()
def BlackKingPawnMoves():
BlackKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = BlackKingPawnMoves[sq]
flag = SquareFlags[sq]
sqs = []
if flag & BlackKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if SquareFlags[i] & BlackKingFlag]
elif flag & BlackPawnFlag:
sqs = [i for i in [u(sq), l(sq), r(sq)] if SquareFlags[i] & BlackPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return BlackKingPawnMoves
BlackKingPawnMoves = BlackKingPawnMoves()
def AdvisorBishopMoves():
AdvisorBishopMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = AdvisorBishopMoves[sq]
flag = SquareFlags[sq]
if flag & BishopFlag:
for square in [u(u(r(r(sq)))), u(u(l(l(sq)))), d(d(r(r(sq)))), d(d(l(l(sq))))]:
if SquareFlags[square] & BishopFlag:
Moves[Moves.index(90)] = square
elif flag & AdvisorFlag:
for square in [u(l(sq)), u(r(sq)), d(l(sq)), d(r(sq))]:
if SquareFlags[square] & AdvisorFlag:
Moves[Moves.index(90)] = square
return AdvisorBishopMoves
AdvisorBishopMoves = AdvisorBishopMoves()
def main():
dict = {}
dict['xs'] = d1a_str(Xs, u32)
dict['ys'] = d1a_str(Ys, u32)
dict['xys'] = d2a_str(XYs, u32)
dict['ups'] = d1a_str(SquareUps, u32)
dict['downs'] = d1a_str(SquareDowns, u32)
dict['lefts'] = d1a_str(SquareLefts, u32)
dict['rights'] = d1a_str(SquareRights, u32)
dict['square_flags'] = d1a_str(SquareFlags, u32)
dict ['ptypes'] = d1a_str(PT, u32)
dict ['pflags'] = d1a_str(PF, u32)
dict ['pcolors'] = d1a_str(PC, u32)
dict ['kl'] = d2a_str(KnightLegs, u32)
dict ['be'] = d2a_str(BishopEyes, u32)
dict['nm'] = d2a_str(KnightMoves, u32)
dict['rkpm'] = d2a_str(RedKingPawnMoves, u32)
dict['bkpm'] = d2a_str(BlackKingPawnMoves, u32)
dict['abm'] = d2a_str(AdvisorBishopMoves, u32)
template = open(os.path.join(template_path, 'xq_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'xq_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def main():
line_infos = [[0]*1024 for i in range(10)]
for idx in range(10):
for flag in range(1024):
flag |= 0xFC00
p1 = (idx and [idx-1] or [0xF])[0]
while not flag & (1 << p1):
p1 = (p1 and [p1-1] or [0xF])[0]
p2 = (p1 and [p1-1] or [0xF])[0]
while not flag & (1 << p2):
p2 = (p2 and [p2-1] or [0xF])[0]
n1 = idx + 1
while not flag & (1 << n1):
n1 += 1
n2 = n1 + 1
while not flag & (1 << n2):
n2 += 1
line_infos[idx][flag & 1023] = p1 | (p2 << 8) | (n1 << 16) | (n2 << 24)
dict = {}
dict['infos'] = d2a_str(line_infos, u32)
template = open(os.path.join(template_path, 'bitlines_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'bitlines_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
import sys
cpp_path = ['#include']
lib_path = ['#bin']
env = Environment(CPPPATH=cpp_path, LIBPATH=lib_path)
if sys.platform == "win32":
env.Tool('mingw')
Export("env")
env.SConscript('src/sconscript', build_dir='build/folium', duplicate=0)
env.SConscript('qianhong/sconscript', build_dir='build/qianhong', duplicate=0)
| Python |
import xq_data
xq_data.main()
import engine_data
engine_data.main()
import history_data
history_data.main()
import bitlines_data
bitlines_data.main()
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
RedKingPawnValues = [0]*91
BlackKingPawnValues = [0]*91
AdvisorBishopValues = [0]*91
RedRookValues = [0]*91
BlackRookValues = [0]*91
RedKnightValues = [0]*91
BlackKnightValues = [0]*91
RedCannonValues = [0]*91
BlackCannonValues = [0]*91
def value():
KingBaseValue = 5000
AdvisorBaseValue = 40
BishopBaseValue = 40
RookBaseValue = 200
KnightBaseValue = 88
CannonBaseValue = 96
PawnBaseValue = 9
RedKingPawnPositionValues = [
0, 0, 0, 1, 5, 1, 0, 0, 0,
0, 0, 0, -8, -8, -8, 0, 0, 0,
0, 0, 0, -9, -9, -9, 0, 0, 0,
-2, 0, -2, 0, 6, 0, -2, 0, -2,
3, 0, 4, 0, 7, 0, 4, 0, 3,
10, 18, 22, 35, 40, 35, 22, 18, 10,
20, 27, 30, 40, 42, 40, 35, 27, 20,
20, 30, 45, 55, 55, 55, 45, 30, 20,
20, 30, 50, 65, 70, 65, 50, 30, 20,
0, 0, 0, 2, 4, 2, 0, 0, 0,]
RedAdvisorBishopPositionValues = [
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0,
-2, 0, 0, 0, 3, 0, 0, 0, -2,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,]
RedRookPositionValues = [
-6, 6, 4, 12, 0, 12, 4, 6, -6,
5, 8, 6, 12, 0, 12, 6, 8, 5,
-2, 8, 4, 12, 12, 12, 4, 8, -2,
4, 9, 4, 12, 14, 12, 4, 9, 4,
8, 12, 12, 14, 15, 14, 12, 12, 8,
8, 11, 11, 14, 15, 14, 11, 11, 8,
6, 13, 13, 16, 16, 16, 13, 13, 6,
6, 8, 7, 14, 16, 14, 7, 8, 6,
6, 12, 9, 16, 33, 16, 9, 12, 6,
6, 8, 7, 13, 14, 13, 7, 8, 6,]
RedKnightPositionValues = [
0, -3, 2, 0, 2, 0, 2, -3, 0,
-3, 2, 4, 5, -10, 5, 4, 2, -3,
5, 4, 6, 7, 4, 7, 6, 4, 5,
4, 6, 10, 7, 10, 7, 10, 6, 4,
2, 10, 13, 14, 15, 14, 13, 10, 2,
2, 12, 11, 15, 16, 15, 11, 12, 2,
5, 20, 12, 19, 12, 19, 12, 20, 5,
4, 10, 11, 15, 11, 15, 11, 10, 4,
2, 8, 15, 9, 6, 9, 15, 8, 2,
2, 2, 2, 8, 2, 8, 2, 2, 2,]
RedCannonPositionValues = [
0, 0, 1, 3, 3, 3, 1, 0, 0,
0, 1, 2, 2, 2, 2, 2, 1, 0,
1, 0, 4, 3, 5, 3, 4, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 3, 0, 4, 0, 3, 0, -1,
0, 0, 0, 0, 4, 0, 0, 0, 0,
0, 3, 3, 2, 4, 2, 3, 3, 0,
1, 1, 0, -5, -4, -5, 0, 1, 1,
2, 2, 0, -4, -7, -4, 0, 2, 2,
4, 4, 0, -5, -6, -5, 0, 4, 4,]
for sq in range(90):
flag = SquareFlags[sq]
if flag & RedKingFlag:
RedKingPawnValues[sq] = KingBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
if flag & RedAdvisorFlag:
AdvisorBishopValues[sq] = AdvisorBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
if flag & RedBishopFlag:
AdvisorBishopValues[sq] = BishopBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
RedRookValues[sq] = RookBaseValue + RedRookPositionValues[sq]
BlackRookValues[89 - sq] = -RedRookValues[sq]
RedKnightValues[sq] = KnightBaseValue + RedKnightPositionValues[sq]
BlackKnightValues[89 - sq] = -RedKnightValues[sq]
RedCannonValues[sq] = CannonBaseValue + RedCannonPositionValues[sq]
BlackCannonValues[89 - sq] = -RedCannonValues[sq]
if flag & RedPawnFlag:
RedKingPawnValues[sq] = PawnBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
value()
RedKingPawnLocks = [0]*91
BlackKingPawnLocks = [0]*91
AdvisorBishopLocks = [0]*91
RedRookLocks = [0]*91
BlackRookLocks = [0]*91
RedKnightLocks = [0]*91
BlackKnightLocks = [0]*91
RedCannonLocks = [0]*91
BlackCannonLocks = [0]*91
RedKingPawnKeys = [0]*91
BlackKingPawnKeys = [0]*91
AdvisorBishopKeys = [0]*91
RedRookKeys = [0]*91
BlackRookKeys = [0]*91
RedKnightKeys = [0]*91
BlackKnightKeys = [0]*91
RedCannonKeys = [0]*91
BlackCannonKeys = [0]*91
def hash():
from random import randint, seed
seed(51)
for sq in range(90):
flag = SquareFlags[sq]
if flag & RedKingPawnFlag:
RedKingPawnLocks[sq] = randint(0, 0x10000000000000000)
RedKingPawnKeys[sq] = randint(0, 0x100000000)
BlackKingPawnLocks[89 - sq] = randint(0, 0x10000000000000000)
BlackKingPawnKeys[89 - sq] = randint(0, 0x100000000)
if flag & AdvisorBishopFlag:
AdvisorBishopLocks[sq] = randint(0, 0x10000000000000000)
AdvisorBishopKeys[sq] = randint(0, 0x100000000)
RedRookLocks[sq] = randint(0, 0x10000000000000000)
RedRookKeys[sq] = randint(0, 0x100000000)
BlackRookLocks[sq] = randint(0, 0x10000000000000000)
BlackRookKeys[sq] = randint(0, 0x100000000)
RedKnightLocks[sq] = randint(0, 0x10000000000000000)
RedKnightKeys[sq] = randint(0, 0x100000000)
BlackKnightLocks[sq] = randint(0, 0x10000000000000000)
BlackKnightKeys[sq] = randint(0, 0x100000000)
RedCannonLocks[sq] = randint(0, 0x10000000000000000)
RedCannonKeys[sq] = randint(0, 0x100000000)
BlackCannonLocks[sq] = randint(0, 0x10000000000000000)
BlackCannonKeys[sq] = randint(0, 0x100000000)
hash()
def main():
dict = {}
dict['rkpl'] = d1a_str(RedKingPawnLocks, u64)
dict['rkpk'] = d1a_str(RedKingPawnKeys, u32)
dict['rkpv'] = d1a_str(RedKingPawnValues, s32)
dict['bkpl'] = d1a_str(BlackKingPawnLocks, u64)
dict['bkpk'] = d1a_str(BlackKingPawnKeys, u32)
dict['bkpv'] = d1a_str(BlackKingPawnValues, s32)
dict['abl'] = d1a_str(AdvisorBishopLocks, u64)
dict['abk'] = d1a_str(AdvisorBishopKeys, u32)
dict['abv'] = d1a_str(AdvisorBishopValues, s32)
dict['rrl'] = d1a_str(RedRookLocks, u64)
dict['rrk'] = d1a_str(RedRookKeys, u32)
dict['rrv'] = d1a_str(RedRookValues, s32)
dict['brl'] = d1a_str(BlackRookLocks, u64)
dict['brk'] = d1a_str(BlackRookKeys, u32)
dict['brv'] = d1a_str(BlackRookValues, s32)
dict['rnl'] = d1a_str(RedKnightLocks, u64)
dict['rnk'] = d1a_str(RedKnightKeys, u32)
dict['rnv'] = d1a_str(RedKnightValues, s32)
dict['bnl'] = d1a_str(BlackKnightLocks, u64)
dict['bnk'] = d1a_str(BlackKnightKeys, u32)
dict['bnv'] = d1a_str(BlackKnightValues, s32)
dict['rcl'] = d1a_str(RedCannonLocks, u64)
dict['rck'] = d1a_str(RedCannonKeys, u32)
dict['rcv'] = d1a_str(RedCannonValues, s32)
dict['bcl'] = d1a_str(BlackCannonLocks, u64)
dict['bck'] = d1a_str(BlackCannonKeys, u32)
dict['bcv'] = d1a_str(BlackCannonValues, s32)
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
PL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_red_rook_locks']*2+['s_red_knight_locks']*2\
+['s_red_cannon_locks']*2+['s_red_king_pawn_locks']*5\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_black_rook_locks']*2+['s_black_knight_locks']*2\
+['s_black_cannon_locks']*2+['s_black_king_pawn_locks']*5
PK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_red_rook_keys']*2+['s_red_knight_keys']*2\
+['s_red_cannon_keys']*2+['s_red_king_pawn_keys']*5\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_black_rook_keys']*2+['s_black_knight_keys']*2\
+['s_black_cannon_keys']*2+['s_black_king_pawn_keys']*5
PV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*4+['s_red_rook_values']*2+['s_red_knight_values']*2\
+['s_red_cannon_values']*2+['s_red_king_pawn_values']*5\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*4+['s_black_rook_values']*2+['s_black_knight_values']*2\
+['s_black_cannon_values']*2+['s_black_king_pawn_values']*5
dict['plocks'] = d1a_str(PL, lambda x: x)
dict['pkeys'] = d1a_str(PK, lambda x: x)
dict['pvalues'] = d1a_str(PV, lambda x: x)
TL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_red_rook_locks','s_red_knight_locks','s_red_cannon_locks','s_red_king_pawn_locks']\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_black_rook_locks','s_black_knight_locks','s_black_cannon_locks','s_black_king_pawn_locks']
TK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_red_rook_keys','s_red_knight_keys','s_red_cannon_keys','s_red_king_pawn_keys']\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_black_rook_keys','s_black_knight_keys','s_black_cannon_keys','s_black_king_pawn_keys']
TV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_red_rook_values','s_red_knight_values','s_red_cannon_values','s_red_king_pawn_values']\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_black_rook_values','s_black_knight_values','s_black_cannon_values','s_black_king_pawn_values']
dict['tlocks'] = d1a_str(TL, lambda x: x)
dict['tkeys'] = d1a_str(TK, lambda x: x)
dict['tvalues'] = d1a_str(TV, lambda x: x)
#template = string.Template(template)
template = open(os.path.join(template_path, 'engine_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'engine_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
def capture_scores():
capture_scores = [[0]*32 for i in range(32)]
e = [10000, 1041, 1040, 2000, 1088, 1096, 1020]
m = [1000, 41, 40, 200, 88, 96, 20]
def level(src_type, dst_type): return levels[src_type][dst_type]
def color(piece): return PC[piece]
def type(piece):
t = PT[piece]
if t >= 7:
t -= 7
return t
for src_piece in range(32):
for dst_piece in range(32):
if color(src_piece) != color(dst_piece):
src_type = type(src_piece)
dst_type = type(dst_piece)
capture_scores[src_piece][dst_piece] = e[dst_type] - m[src_type]
return capture_scores
capture_scores = capture_scores()
def main():
dict = {}
dict['scores'] = d2a_str(capture_scores, u32)
template = open(os.path.join(template_path, 'history_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'history_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def main():
line_infos = [[0]*1024 for i in range(10)]
for idx in range(10):
for flag in range(1024):
flag |= 0xFC00
p1 = (idx and [idx-1] or [0xF])[0]
while not flag & (1 << p1):
p1 = (p1 and [p1-1] or [0xF])[0]
p2 = (p1 and [p1-1] or [0xF])[0]
while not flag & (1 << p2):
p2 = (p2 and [p2-1] or [0xF])[0]
n1 = idx + 1
while not flag & (1 << n1):
n1 += 1
n2 = n1 + 1
while not flag & (1 << n2):
n2 += 1
line_infos[idx][flag & 1023] = p1 | (p2 << 8) | (n1 << 16) | (n2 << 24)
dict = {}
dict['infos'] = d2a_str(line_infos, u32)
template = open(os.path.join(template_path, 'bitlines_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'bitlines_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
RedKing = 0;
RedAdvisor = 1;
RedBishop = 2;
RedRook = 3;
RedKnight = 4;
RedCannon = 5;
RedPawn = 6;
BlackKing = 7;
BlackAdvisor = 8;
BlackBishop = 9;
BlackRook = 10;
BlackKnight = 11;
BlackCannon = 12;
BlackPawn = 13;
EmptyType = 14;
InvalidType = 15;
RedKingFlag = 1 << 0;
RedAdvisorFlag = 1 << 1;
RedBishopFlag = 1 << 2;
RedRookFlag = 1 << 3;
RedKnightFlag = 1 << 4;
RedCannonFlag = 1 << 5;
RedPawnFlag = 1 << 6;
BlackKingFlag = 1 << 7;
BlackAdvisorFlag = 1 << 8;
BlackBishopFlag = 1 << 9;
BlackRookFlag = 1 << 10;
BlackKnightFlag = 1 << 11;
BlackCannonFlag = 1 << 12;
BlackPawnFlag = 1 << 13;
EmptyFlag = 1 << 14;
InvaildFlag = 1 << 15;
AdvisorFlag = RedAdvisorFlag | BlackAdvisorFlag
BishopFlag = RedBishopFlag | BlackBishopFlag
RedKingPawnFlag = RedKingFlag | RedPawnFlag
AdvisorBishopFlag = RedAdvisorFlag | RedBishopFlag | BlackAdvisorFlag | BlackBishopFlag
RedKingSquares = [x + y * 9 for x, y in [(3, 0), (4, 0), (5, 0), (3, 1), (4, 1), (5, 1), (3, 2), (4, 2), (5, 2)]]
RedAdvisorSquares = [x + y * 9 for x, y in [(3,0), (5,0), (4, 1), (3,2), (5,2)]]
RedBishopSquares = [x + y * 9 for x, y in [(2, 0), (6, 0), (0, 2), (4, 2), (8, 2), (2, 4), (6, 4)]]
RedRookSquares = range(90)
RedKnightSquares = range(90)
RedCannonSquares = range(90)
RedPawnSquares = [x + y * 9 for x, y in [(0, 3), (2, 3), (4, 3), (6, 3), (8, 3), (0, 4), (2, 4), (4, 4), (6, 4), (8, 4)]]
RedPawnSquares.extend(range(45, 90))
BlackKingSquares = [89 - sq for sq in RedKingSquares]
BlackAdvisorSquares = [89 - sq for sq in RedAdvisorSquares]
BlackBishopSquares = [89 - sq for sq in RedBishopSquares]
BlackRookSquares = [89 - sq for sq in RedRookSquares]
BlackKnightSquares = [89 - sq for sq in RedKnightSquares]
BlackCannonSquares = [89 - sq for sq in RedCannonSquares]
BlackPawnSquares = [89 - sq for sq in RedPawnSquares]
def SquareFlags():
SquareFlags = [0]*91
for sq in RedKingSquares:
SquareFlags[sq] |= RedKingFlag
SquareFlags[89 - sq] |= BlackKingFlag
for sq in RedAdvisorSquares:
SquareFlags[sq] |= RedAdvisorFlag
SquareFlags[89 - sq] |= BlackAdvisorFlag
for sq in RedBishopSquares:
SquareFlags[sq] |= RedBishopFlag
SquareFlags[89 - sq] |= BlackBishopFlag
for sq in RedPawnSquares:
SquareFlags[sq] |= RedPawnFlag
SquareFlags[89 - sq] |= BlackPawnFlag
for sq in range(90):
SquareFlags[sq] |= RedRookFlag
SquareFlags[sq] |= RedKnightFlag
SquareFlags[sq] |= RedCannonFlag
SquareFlags[sq] |= BlackRookFlag
SquareFlags[sq] |= BlackKnightFlag
SquareFlags[sq] |= BlackCannonFlag
SquareFlags[sq] |= EmptyFlag
SquareFlags[90] |= InvaildFlag
return SquareFlags
SquareFlags = SquareFlags()
def u64(i):
return str(i)+'ULL'
def u32(i):
return str(i)+'UL'
def s32(i):
return str(i)+'L'
def d1a_str(array_1d, func):
array_1d = [func(i) for i in array_1d]
return ', '.join(array_1d)
def d2a_str(array_2d, func):
array_2d = ['{%s}'%d1a_str(array_1d, func) for array_1d in array_2d]
return ',\n'.join(array_2d)
import os
script_path = os.path.abspath(os.path.dirname(__file__))
work_path = os.path.dirname(script_path)
folium_path = os.path.join(work_path, 'src')
template_path = os.path.join(script_path, 'template')
if not os.path.exists(template_path):
os.mkdir(template_path)
if __name__ == "__main__":
print work_path
print script_path
print folium_path
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def u(x): return SquareUps[x]
def d(x): return SquareDowns[x]
def l(x): return SquareLefts[x]
def r(x): return SquareRights[x]
SquareUps = [0]*91
SquareDowns = [0]*91
SquareLefts = [0]*91
SquareRights = [0]*91
Xs = [9]*91
Ys = [10]*91
XYs = [[90]*16 for i in range(16)]
KnightLegs = [[90]*128 for i in range(91)]
BishopEyes = [[90]*128 for i in range(91)]
def info():
def _(x, y):
if x < 0 or x >8 or y < 0 or y > 9:
return 90
return x + 9 * y
for sq in range(90):
#x, y = sq % 9, sq / 9
y, x = divmod(sq, 9)
SquareUps[sq] = _(x, y - 1)
SquareDowns[sq] = _(x, y + 1)
SquareLefts[sq] = _(x - 1, y)
SquareRights[sq] = _(x + 1, y)
Xs[sq] = x
Ys[sq] = y
XYs[y][x] = sq
SquareUps[90] = 90
SquareDowns[90] = 90
SquareLefts[90] = 90
SquareRights[90] = 90
info()
def leg():
u = lambda s:SquareUps[s]
d = lambda s:SquareDowns[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
for src in range(90):
leg = u(src)
dst = l(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = d(src)
dst = l(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = l(src)
dst = u(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = r(src)
dst = u(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg()
def eye():
u = lambda s:SquareUps[s]
d = lambda s:SquareDowns[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
for src in range(90):
if not (SquareFlags[src] & BishopFlag):
continue
dst = u(u(l(l(src))))
if (SquareFlags[dst] & BishopFlag):BishopEyes[src][dst] = (src + dst)/2
dst = u(u(r(r(src))))
if (SquareFlags[dst] & BishopFlag):BishopEyes[src][dst] = (src + dst)/2
dst = d(d(l(l(src))))
if (SquareFlags[dst] & BishopFlag):BishopEyes[src][dst] = (src + dst)/2
dst = d(d(r(r(src))))
if (SquareFlags[dst] & BishopFlag):BishopEyes[src][dst] = (src + dst)/2
eye()
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
def KnightMoves():
KnightMoves = [[23130]*16 for i in range(91)]
for sq in range(90):
ls = KnightMoves[sq]
leg = u(sq)
for dst in [l(u(leg)),r(u(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = d(sq)
for dst in [l(d(leg)),r(d(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = l(sq)
for dst in [u(l(leg)),d(l(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = r(sq)
for dst in [u(r(leg)),d(r(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
return KnightMoves
KnightMoves = KnightMoves()
def RedKingPawnMoves():
RedKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = RedKingPawnMoves[sq]
flag = SquareFlags[sq]
sqs = []
if flag & RedKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if SquareFlags[i] & RedKingFlag]
elif flag & RedPawnFlag:
sqs = [i for i in [d(sq), l(sq), r(sq)] if SquareFlags[i] & RedPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return RedKingPawnMoves
RedKingPawnMoves = RedKingPawnMoves()
def BlackKingPawnMoves():
BlackKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = BlackKingPawnMoves[sq]
flag = SquareFlags[sq]
sqs = []
if flag & BlackKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if SquareFlags[i] & BlackKingFlag]
elif flag & BlackPawnFlag:
sqs = [i for i in [u(sq), l(sq), r(sq)] if SquareFlags[i] & BlackPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return BlackKingPawnMoves
BlackKingPawnMoves = BlackKingPawnMoves()
def AdvisorBishopMoves():
AdvisorBishopMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = AdvisorBishopMoves[sq]
flag = SquareFlags[sq]
if flag & BishopFlag:
for square in [u(u(r(r(sq)))), u(u(l(l(sq)))), d(d(r(r(sq)))), d(d(l(l(sq))))]:
if SquareFlags[square] & BishopFlag:
Moves[Moves.index(90)] = square
elif flag & AdvisorFlag:
for square in [u(l(sq)), u(r(sq)), d(l(sq)), d(r(sq))]:
if SquareFlags[square] & AdvisorFlag:
Moves[Moves.index(90)] = square
return AdvisorBishopMoves
AdvisorBishopMoves = AdvisorBishopMoves()
def main():
dict = {}
dict['xs'] = d1a_str(Xs, u32)
dict['ys'] = d1a_str(Ys, u32)
dict['xys'] = d2a_str(XYs, u32)
dict['ups'] = d1a_str(SquareUps, u32)
dict['downs'] = d1a_str(SquareDowns, u32)
dict['lefts'] = d1a_str(SquareLefts, u32)
dict['rights'] = d1a_str(SquareRights, u32)
dict['square_flags'] = d1a_str(SquareFlags, u32)
dict ['ptypes'] = d1a_str(PT, u32)
dict ['pflags'] = d1a_str(PF, u32)
dict ['pcolors'] = d1a_str(PC, u32)
dict ['kl'] = d2a_str(KnightLegs, u32)
dict ['be'] = d2a_str(BishopEyes, u32)
dict['nm'] = d2a_str(KnightMoves, u32)
dict['rkpm'] = d2a_str(RedKingPawnMoves, u32)
dict['bkpm'] = d2a_str(BlackKingPawnMoves, u32)
dict['abm'] = d2a_str(AdvisorBishopMoves, u32)
template = open(os.path.join(template_path, 'xq_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'xq_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def main():
line_infos = [[0]*1024 for i in range(10)]
for idx in range(10):
for flag in range(1024):
flag |= 0xFC00
p1 = (idx and [idx-1] or [0xF])[0]
while not flag & (1 << p1):
p1 = (p1 and [p1-1] or [0xF])[0]
p2 = (p1 and [p1-1] or [0xF])[0]
while not flag & (1 << p2):
p2 = (p2 and [p2-1] or [0xF])[0]
n1 = idx + 1
while not flag & (1 << n1):
n1 += 1
n2 = n1 + 1
while not flag & (1 << n2):
n2 += 1
line_infos[idx][flag & 1023] = p1 | (p2 << 8) | (n1 << 16) | (n2 << 24)
dict = {}
dict['infos'] = d2a_str(line_infos, u32)
template = open(os.path.join(template_path, 'bitlines_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'bitlines_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
import random
import _xq
class Book(object):
def readflag(self, fen):
_ = []
for flag in range(4):
new= _xq.mirror4fen(fen, flag)
if not new.endswith('b'):
_.append([new, flag])
_.sort()
return _[0] if _[0][0] >= _[1][0] else _[1]
def search(self, fen, bans):
fen, flag = self.readflag(fen)
moves = self.readmoves(fen)
moves = [(_xq.mirror4uccimove(move, flag), moves[move]) for move in moves]
moves = [(move, score) for move, score in moves if move not in bans]
scores = sum(score for move, score in moves)
if scores == 0:
return
idx = random.randint(0, scores-1)
for move, score in moves:
if score > idx:
return move
idx = idx - score
def addflag(self, fen, move):
_ = []
for flag in range(4):
nf, nm = _xq.mirror4fen(fen, flag), _xq.mirror4uccimove(move, flag)
if not nf.endswith('b'):
_.append([nf, nm, flag])
return sorted(_)[1]
| Python |
import os
import sys
import book
class dictbook(book.Book):
def __init__(self):
book.Book.__init__(self)
self.load()
def load(self):
bookfile = os.path.join(os.path.dirname(sys.argv[0]), 'book.dict')
self.dict = {}
for line in open(bookfile):
_ = line.split('_')
fen = _[0]
for s in _[1:]:
move, score = s.split(':')
self.add(fen, move, score)
def add(self, fen, move, score):
fen, move, flag = self.addflag(fen, move)
moves = self.dict.setdefault(fen, {})
moves[move] = moves.get(move, 0) + int(score)
def readmoves(self, fen):
return self.dict.get(fen, {}) | Python |
import time
import _xq
import snake.book.dictbook
class Engine(_xq.UCCI):
def __init__(self):
_xq.UCCI.__init__(self)
self.book = snake.book.dictbook.dictbook()
def run(self):
_xq.writeline('id name folium')
_xq.writeline('id author Wangmao Lin')
_xq.writeline('id user folium users')
_xq.writeline('option usemillisec type check default true')
self.usemillisec = True
_xq.writeline('ucciok')
self.bans = []
self.loop()
def loop(self):
while True:
while not _xq.readable():
time.sleep(0.001)
line = _xq.readline()
if line == 'isready':
_xq.writeline('readyok')
elif line == 'quit' or line == 'exit':
_xq.writeline('bye')
return
elif line.startswith('setoption '):
self.setoption(line[10:])
elif line.startswith('position '):
self.position(line[9:])
elif line.startswith('banmoves '):
self.banmoves(line[9:])
elif line.startswith('go'):
self.go(line)
elif line.startswith('probe'):
self.probe(line)
def position(self, position):
if " moves " in position:
position, moves = position.split(" moves ")
moves = moves.split()
else:
moves = []
if position.startswith("fen "):
fen = position[4:]
elif position == "startpos":
fen = "rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR r"
self.load(fen)
for move in moves:
move = _xq.ucci2move(move)
self.makemove(move)
self.bans = []
def setoption(self, option):
return
if option.startswith('usemillisec '):
self.usemillisec = (option[12:] == 'true')
elif option.startswith('debug'):
pass
def banmoves(self, moves):
self.bans = moves.split()
def go(self, line):
self.stop = False
self.ponder = False
self.draw = False
self.depth = 255
self.starttime = _xq.currenttime()
self.mintime = self.maxtime = self.starttime + 24*60*60
move = self.book.search(str(self), self.bans) if self.book else None
if move:
_xq.writeline("info book move: %s" % move)
_xq.writeline("info book search time: %f" % (_xq.currenttime() - self.starttime))
_xq.writeline("bestmove %s" % move)
return
if line.startswith("go ponder "):
self.ponder = True
line = line[10:]
elif line.startswith("go draw "):
self.draw = True
line = line[8:]
else:
line = line[2:]
if self.usemillisec:
propertime = limittime = float(24*3600*1000)
else:
propertime = limittime = float(24*3600)
parameters = line.split()
if parameters:
parameter = parameters[0]
if parameter == "depth":
self.ponder = False
parameter = parameters[1]
if parameter != "infinite":
self.depth = int(parameter)
elif parameter == "time":
propertime = limittime = totaltime = float(parameters[1])
parameters = parameters[2:]
while parameters:
parameter = parameters[0]
if parameter == "movestogo":
count = int(parameters[1])
propertime = totaltime/count
limittime = totaltime
elif parameter == "increment":
increment = int(parameters[1])
propertime = totaltime*0.05+increment
limittime = totaltime*0.5
parameters = parameters[2:]
limittime = min(propertime*1.618, limittime)
propertime = propertime*0.618
if self.usemillisec:
propertime = propertime * 0.001
limittime = limittime * 0.001
self.mintime = self.starttime + propertime
self.maxtime = self.starttime + limittime
move = self.search([_xq.ucci2move(move) for move in self.bans])
if move:
_xq.writeline("bestmove %s" % _xq.move2ucci(move))
else:
_xq.writeline("nobestmove") | Python |
from distutils.core import setup
import py2exe
setup(
console = ["main.py"],
)
| Python |
#!/usr/bin/env python
#coding=utf-8
def ucci():
import snake.protocol
engine = snake.protocol.Engine()
engine.run()
def perft():
import _xq
def _():
_xq.writeline(str(xq))
for index, count in enumerate(perfts):
t1 = _xq.currenttime()
assert(xq.perft(index) == count)
t2 = _xq.currenttime()
if t2 - t1 > 0.1:
_xq.writeline("ply:%d\tcount:%d\tnode:%d"%(index+1, count, int(count/(t2-t1))))
xq = _xq.XQ("r1ba1a3/4kn3/2n1b4/pNp1p1p1p/4c4/6P2/P1P2R2P/1CcC5/9/2BAKAB2 r")
perfts = [38, 1128, 43929, 1339047, 53112976]
_()
xq = _xq.XQ("r2akab1r/3n5/4b3n/p1p1pRp1p/9/2P3P2/P3P3c/N2C3C1/4A4/1RBAK1B2 r")
perfts = [58, 1651, 90744, 2605437, 140822416]
_()
xq = _xq.XQ("rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR r")
perfts = [44, 1920, 79666, 3290240, 133312995, 5392831844]
#_() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def main():
line_infos = [[0]*1024 for i in range(10)]
for idx in range(10):
for flag in range(1024):
flag |= 0xFC00
p1 = (idx and [idx-1] or [0xF])[0]
while not flag & (1 << p1):
p1 = (p1 and [p1-1] or [0xF])[0]
p2 = (p1 and [p1-1] or [0xF])[0]
while not flag & (1 << p2):
p2 = (p2 and [p2-1] or [0xF])[0]
n1 = idx + 1
while not flag & (1 << n1):
n1 += 1
n2 = n1 + 1
while not flag & (1 << n2):
n2 += 1
line_infos[idx][flag & 1023] = p1 | (p2 << 4) | (n1 << 8) | (n2 << 12)
bits = [1 << i for i in range(10)]
bit_counts = [len([j for j in bits if i & j])for i in range(1024)]
distance_infos = [[(19 << 10 | 1023)]*128 for i in range(91)]
for src in range(90):
sy, sx = divmod(src, 9)
for dst in range(90):
if dst == src:
continue
dy, dx = divmod(dst, 9)
if dx == sx:
idx = dx+10
mask = sum(bits[i] for i in set(range(dy+1, sy)+range(sy+1, dy)) if i in range(10))
distance_infos[src][dst] = (idx << 10) | mask
elif dy == sy:
idx = dy
mask = sum(bits[i] for i in set(range(dx+1, sx)+range(sx+1, dx)) if i in range(10))
distance_infos[src][dst] = (idx << 10) | mask
dict = {}
dict['infos'] = d2a_str(line_infos, u32)
dict['counts'] = d1a_str(bit_counts, u32)
dict['distance'] = d2a_str(distance_infos, u32)
template = open(os.path.join(template_path, 'bitmap_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'bitmap_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
import xq_data
xq_data.main()
import xq_position_data
xq_position_data.main()
import history_data
history_data.main()
import bitmap_data
bitmap_data.main()
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
def capture_scores():
capture_scores = [[0]*32 for i in range(33)]
e = [10000, 1041, 1040, 2000, 1088, 1096, 1020]
m = [1000, 41, 40, 200, 88, 96, 20]
def level(src_type, dst_type): return levels[src_type][dst_type]
def color(piece): return PC[piece]
def type(piece):
t = PT[piece]
if t >= 7:
t -= 7
return t
for src_piece in range(32):
for dst_piece in range(32):
if color(src_piece) != color(dst_piece):
src_type = type(src_piece)
dst_type = type(dst_piece)
capture_scores[dst_piece][src_piece] = e[dst_type] - m[src_type] + 1 << 17
return capture_scores
capture_scores = capture_scores()
def main():
dict = {}
dict['scores'] = d2a_str(capture_scores, u32)
template = open(os.path.join(template_path, 'history_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'history_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
def capture_scores():
capture_scores = [[0]*32 for i in range(33)]
e = [10000, 1041, 1040, 2000, 1088, 1096, 1020]
m = [1000, 41, 40, 200, 88, 96, 20]
def level(src_type, dst_type): return levels[src_type][dst_type]
def color(piece): return PC[piece]
def type(piece):
t = PT[piece]
if t >= 7:
t -= 7
return t
for src_piece in range(32):
for dst_piece in range(32):
if color(src_piece) != color(dst_piece):
src_type = type(src_piece)
dst_type = type(dst_piece)
capture_scores[dst_piece][src_piece] = e[dst_type] - m[src_type] + 1 << 17
return capture_scores
capture_scores = capture_scores()
def main():
dict = {}
dict['scores'] = d2a_str(capture_scores, u32)
template = open(os.path.join(template_path, 'history_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'history_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def main():
line_infos = [[0]*1024 for i in range(10)]
for idx in range(10):
for flag in range(1024):
flag |= 0xFC00
p1 = (idx and [idx-1] or [0xF])[0]
while not flag & (1 << p1):
p1 = (p1 and [p1-1] or [0xF])[0]
p2 = (p1 and [p1-1] or [0xF])[0]
while not flag & (1 << p2):
p2 = (p2 and [p2-1] or [0xF])[0]
n1 = idx + 1
while not flag & (1 << n1):
n1 += 1
n2 = n1 + 1
while not flag & (1 << n2):
n2 += 1
line_infos[idx][flag & 1023] = p1 | (p2 << 4) | (n1 << 8) | (n2 << 12)
bits = [1 << i for i in range(10)]
bit_counts = [len([j for j in bits if i & j])for i in range(1024)]
distance_infos = [[(19 << 10 | 1023)]*128 for i in range(91)]
for src in range(90):
sy, sx = divmod(src, 9)
for dst in range(90):
if dst == src:
continue
dy, dx = divmod(dst, 9)
if dx == sx:
idx = dx+10
mask = sum(bits[i] for i in set(range(dy+1, sy)+range(sy+1, dy)) if i in range(10))
distance_infos[src][dst] = (idx << 10) | mask
elif dy == sy:
idx = dy
mask = sum(bits[i] for i in set(range(dx+1, sx)+range(sx+1, dx)) if i in range(10))
distance_infos[src][dst] = (idx << 10) | mask
dict = {}
dict['infos'] = d2a_str(line_infos, u32)
dict['counts'] = d1a_str(bit_counts, u32)
dict['distance'] = d2a_str(distance_infos, u32)
template = open(os.path.join(template_path, 'bitmap_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'bitmap_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
RedKingPawnValues = [0]*91
BlackKingPawnValues = [0]*91
AdvisorBishopValues = [0]*91
RedRookValues = [0]*91
BlackRookValues = [0]*91
RedKnightValues = [0]*91
BlackKnightValues = [0]*91
RedCannonValues = [0]*91
BlackCannonValues = [0]*91
def value():
KingBaseValue = 5000
AdvisorBaseValue = 40
BishopBaseValue = 40
RookBaseValue = 200
KnightBaseValue = 88
CannonBaseValue = 96
PawnBaseValue = 9
RedKingPawnPositionValues = [
0, 0, 0, 1, 5, 1, 0, 0, 0,
0, 0, 0, -8, -8, -8, 0, 0, 0,
0, 0, 0, -9, -9, -9, 0, 0, 0,
-2, 0, -2, 0, 6, 0, -2, 0, -2,
3, 0, 4, 0, 7, 0, 4, 0, 3,
10, 18, 22, 35, 40, 35, 22, 18, 10,
20, 27, 30, 40, 42, 40, 35, 27, 20,
20, 30, 45, 55, 55, 55, 45, 30, 20,
20, 30, 50, 65, 70, 65, 50, 30, 20,
0, 0, 0, 2, 4, 2, 0, 0, 0,]
RedAdvisorBishopPositionValues = [
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0,
-2, 0, 0, 0, 3, 0, 0, 0, -2,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,]
RedRookPositionValues = [
-6, 6, 4, 12, 0, 12, 4, 6, -6,
5, 8, 6, 12, 0, 12, 6, 8, 5,
-2, 8, 4, 12, 12, 12, 4, 8, -2,
4, 9, 4, 12, 14, 12, 4, 9, 4,
8, 12, 12, 14, 15, 14, 12, 12, 8,
8, 11, 11, 14, 15, 14, 11, 11, 8,
6, 13, 13, 16, 16, 16, 13, 13, 6,
6, 8, 7, 14, 16, 14, 7, 8, 6,
6, 12, 9, 16, 33, 16, 9, 12, 6,
6, 8, 7, 13, 14, 13, 7, 8, 6,]
RedKnightPositionValues = [
0, -3, 2, 0, 2, 0, 2, -3, 0,
-3, 2, 4, 5, -10, 5, 4, 2, -3,
5, 4, 6, 7, 4, 7, 6, 4, 5,
4, 6, 10, 7, 10, 7, 10, 6, 4,
2, 10, 13, 14, 15, 14, 13, 10, 2,
2, 12, 11, 15, 16, 15, 11, 12, 2,
5, 20, 12, 19, 12, 19, 12, 20, 5,
4, 10, 11, 15, 11, 15, 11, 10, 4,
2, 8, 15, 9, 6, 9, 15, 8, 2,
2, 2, 2, 8, 2, 8, 2, 2, 2,]
RedCannonPositionValues = [
0, 0, 1, 3, 3, 3, 1, 0, 0,
0, 1, 2, 2, 2, 2, 2, 1, 0,
1, 0, 4, 3, 5, 3, 4, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 3, 0, 4, 0, 3, 0, -1,
0, 0, 0, 0, 4, 0, 0, 0, 0,
0, 3, 3, 2, 4, 2, 3, 3, 0,
1, 1, 0, -5, -4, -5, 0, 1, 1,
2, 2, 0, -4, -7, -4, 0, 2, 2,
4, 4, 0, -5, -6, -5, 0, 4, 4,]
for sq in range(90):
flag = CoordinateFlags[sq]
if flag & RedKingFlag:
RedKingPawnValues[sq] = KingBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
if flag & RedAdvisorFlag:
AdvisorBishopValues[sq] = AdvisorBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
if flag & RedBishopFlag:
AdvisorBishopValues[sq] = BishopBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
RedRookValues[sq] = RookBaseValue + RedRookPositionValues[sq]
BlackRookValues[89 - sq] = -RedRookValues[sq]
RedKnightValues[sq] = KnightBaseValue + RedKnightPositionValues[sq]
BlackKnightValues[89 - sq] = -RedKnightValues[sq]
RedCannonValues[sq] = CannonBaseValue + RedCannonPositionValues[sq]
BlackCannonValues[89 - sq] = -RedCannonValues[sq]
if flag & RedPawnFlag:
RedKingPawnValues[sq] = PawnBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
value()
RedKingPawnLocks = [0]*91
BlackKingPawnLocks = [0]*91
AdvisorBishopLocks = [0]*91
RedRookLocks = [0]*91
BlackRookLocks = [0]*91
RedKnightLocks = [0]*91
BlackKnightLocks = [0]*91
RedCannonLocks = [0]*91
BlackCannonLocks = [0]*91
RedKingPawnKeys = [0]*91
BlackKingPawnKeys = [0]*91
AdvisorBishopKeys = [0]*91
RedRookKeys = [0]*91
BlackRookKeys = [0]*91
RedKnightKeys = [0]*91
BlackKnightKeys = [0]*91
RedCannonKeys = [0]*91
BlackCannonKeys = [0]*91
def hash():
from random import randint, seed
seed(51)
for sq in range(90):
flag = CoordinateFlags[sq]
if flag & RedKingPawnFlag:
RedKingPawnLocks[sq] = randint(0, 0x10000000000000000)
RedKingPawnKeys[sq] = randint(0, 0x100000000)
BlackKingPawnLocks[89 - sq] = randint(0, 0x10000000000000000)
BlackKingPawnKeys[89 - sq] = randint(0, 0x100000000)
if flag & AdvisorBishopFlag:
AdvisorBishopLocks[sq] = randint(0, 0x10000000000000000)
AdvisorBishopKeys[sq] = randint(0, 0x100000000)
RedRookLocks[sq] = randint(0, 0x10000000000000000)
RedRookKeys[sq] = randint(0, 0x100000000)
BlackRookLocks[sq] = randint(0, 0x10000000000000000)
BlackRookKeys[sq] = randint(0, 0x100000000)
RedKnightLocks[sq] = randint(0, 0x10000000000000000)
RedKnightKeys[sq] = randint(0, 0x100000000)
BlackKnightLocks[sq] = randint(0, 0x10000000000000000)
BlackKnightKeys[sq] = randint(0, 0x100000000)
RedCannonLocks[sq] = randint(0, 0x10000000000000000)
RedCannonKeys[sq] = randint(0, 0x100000000)
BlackCannonLocks[sq] = randint(0, 0x10000000000000000)
BlackCannonKeys[sq] = randint(0, 0x100000000)
file = open('hash.data')
for seq in [RedKingPawnLocks,BlackKingPawnLocks,AdvisorBishopLocks,RedRookLocks,BlackRookLocks,RedKnightLocks,BlackKnightLocks,RedCannonLocks,BlackCannonLocks]:
for i in range(90):
i1 = int(file.readline())
i2 = int(file.readline())
seq[i] = (i1<<32)|i2
for seq in [RedKingPawnKeys,BlackKingPawnKeys,AdvisorBishopKeys,RedRookKeys,BlackRookKeys,RedKnightKeys,BlackKnightKeys,RedCannonKeys,BlackCannonKeys]:
for i in range(90):
seq[i] = int(file.readline())
hash()
def main():
dict = {}
dict['rkpl'] = d1a_str(RedKingPawnLocks, u64)
dict['rkpk'] = d1a_str(RedKingPawnKeys, u32)
dict['rkpv'] = d1a_str(RedKingPawnValues, s32)
dict['bkpl'] = d1a_str(BlackKingPawnLocks, u64)
dict['bkpk'] = d1a_str(BlackKingPawnKeys, u32)
dict['bkpv'] = d1a_str(BlackKingPawnValues, s32)
dict['abl'] = d1a_str(AdvisorBishopLocks, u64)
dict['abk'] = d1a_str(AdvisorBishopKeys, u32)
dict['abv'] = d1a_str(AdvisorBishopValues, s32)
dict['rrl'] = d1a_str(RedRookLocks, u64)
dict['rrk'] = d1a_str(RedRookKeys, u32)
dict['rrv'] = d1a_str(RedRookValues, s32)
dict['brl'] = d1a_str(BlackRookLocks, u64)
dict['brk'] = d1a_str(BlackRookKeys, u32)
dict['brv'] = d1a_str(BlackRookValues, s32)
dict['rnl'] = d1a_str(RedKnightLocks, u64)
dict['rnk'] = d1a_str(RedKnightKeys, u32)
dict['rnv'] = d1a_str(RedKnightValues, s32)
dict['bnl'] = d1a_str(BlackKnightLocks, u64)
dict['bnk'] = d1a_str(BlackKnightKeys, u32)
dict['bnv'] = d1a_str(BlackKnightValues, s32)
dict['rcl'] = d1a_str(RedCannonLocks, u64)
dict['rck'] = d1a_str(RedCannonKeys, u32)
dict['rcv'] = d1a_str(RedCannonValues, s32)
dict['bcl'] = d1a_str(BlackCannonLocks, u64)
dict['bck'] = d1a_str(BlackCannonKeys, u32)
dict['bcv'] = d1a_str(BlackCannonValues, s32)
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
PL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_red_rook_locks']*2+['s_red_knight_locks']*2\
+['s_red_cannon_locks']*2+['s_red_king_pawn_locks']*5\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_black_rook_locks']*2+['s_black_knight_locks']*2\
+['s_black_cannon_locks']*2+['s_black_king_pawn_locks']*5
PK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_red_rook_keys']*2+['s_red_knight_keys']*2\
+['s_red_cannon_keys']*2+['s_red_king_pawn_keys']*5\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_black_rook_keys']*2+['s_black_knight_keys']*2\
+['s_black_cannon_keys']*2+['s_black_king_pawn_keys']*5
PV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*4+['s_red_rook_values']*2+['s_red_knight_values']*2\
+['s_red_cannon_values']*2+['s_red_king_pawn_values']*5\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*4+['s_black_rook_values']*2+['s_black_knight_values']*2\
+['s_black_cannon_values']*2+['s_black_king_pawn_values']*5
dict['plocks'] = d1a_str(PL, lambda x: x)
dict['pkeys'] = d1a_str(PK, lambda x: x)
dict['pvalues'] = d1a_str(PV, lambda x: x)
TL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_red_rook_locks','s_red_knight_locks','s_red_cannon_locks','s_red_king_pawn_locks']\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_black_rook_locks','s_black_knight_locks','s_black_cannon_locks','s_black_king_pawn_locks']
TK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_red_rook_keys','s_red_knight_keys','s_red_cannon_keys','s_red_king_pawn_keys']\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_black_rook_keys','s_black_knight_keys','s_black_cannon_keys','s_black_king_pawn_keys']
TV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_red_rook_values','s_red_knight_values','s_red_cannon_values','s_red_king_pawn_values']\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_black_rook_values','s_black_knight_values','s_black_cannon_values','s_black_king_pawn_values']
dict['tlocks'] = d1a_str(TL, lambda x: x)
dict['tkeys'] = d1a_str(TK, lambda x: x)
dict['tvalues'] = d1a_str(TV, lambda x: x)
#template = string.Template(template)
template = open(os.path.join(template_path, 'xq_position_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'xq_position_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def u(x): return SquareDowns[x]
def d(x): return SquareUps[x]
def l(x): return SquareLefts[x]
def r(x): return SquareRights[x]
SquareDowns = [0]*91
SquareUps = [0]*91
SquareLefts = [0]*91
SquareRights = [0]*91
Xs = [9]*91
Ys = [10]*91
XYs = [[90]*16 for i in range(16)]
KnightLegs = [[90]*128 for i in range(91)]
BishopEyes = [[90]*128 for i in range(91)]
def info():
def _(x, y):
if x < 0 or x >8 or y < 0 or y > 9:
return 90
return x + 9 * y
for sq in range(90):
#x, y = sq % 9, sq / 9
y, x = divmod(sq, 9)
SquareDowns[sq] = _(x, y - 1)
SquareUps[sq] = _(x, y + 1)
SquareLefts[sq] = _(x - 1, y)
SquareRights[sq] = _(x + 1, y)
Xs[sq] = x
Ys[sq] = y
XYs[y][x] = sq
SquareDowns[90] = 90
SquareUps[90] = 90
SquareLefts[90] = 90
SquareRights[90] = 90
info()
def leg():
u = lambda s:SquareDowns[s]
d = lambda s:SquareUps[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
for src in range(90):
leg = u(src)
dst = l(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = d(src)
dst = l(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = l(src)
dst = u(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = r(src)
dst = u(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg()
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
def MoveFlags():
u = lambda s:SquareDowns[s]
d = lambda s:SquareUps[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
MoveFlags = [[0]*128 for i in range(91)]
for src in range(90):
sf = CoordinateFlags[src]
#red king
if sf & RedKingFlag:
for dst in [u(src), d(src), l(src), r(src)]:
if CoordinateFlags[dst] & RedKingFlag:
#这里加上兵的flag主要是为了可以区分和将见面的情况,下同
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKingFlag | RedPawnFlag
#black king
elif sf & BlackKingFlag:
for dst in [u(src), d(src), l(src), r(src)]:
if CoordinateFlags[dst] & BlackKingFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackKingFlag | BlackPawnFlag
#red advisor
if sf & RedAdvisorFlag:
for dst in [l(u(src)), l(d(src)), r(u(src)), r(d(src))]:
if CoordinateFlags[dst] & RedAdvisorFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedAdvisorFlag
#black advisor
elif sf & BlackAdvisorFlag:
for dst in [l(u(src)), l(d(src)), r(u(src)), r(d(src))]:
if CoordinateFlags[dst] & BlackAdvisorFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackAdvisorFlag
#red bishop
elif sf & RedBishopFlag:
for dst in [l(l(u(u(src)))), l(l(d(d(src)))), r(r(u(u(src)))), r(r(d(d(src))))]:
if CoordinateFlags[dst] & RedBishopFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedBishopFlag
#black bishop
elif sf & BlackBishopFlag:
for dst in [l(l(u(u(src)))), l(l(d(d(src)))), r(r(u(u(src)))), r(r(d(d(src))))]:
if CoordinateFlags[dst] & BlackBishopFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackBishopFlag
#knight
for dst in [l(u(u(src))), l(d(d(src))), r(u(u(src))), r(d(d(src))), l(l(u(src))), l(l(d(src))), r(r(u(src))), r(r(d(src)))]:
if dst in range(90):
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKnightFlag | BlackKnightFlag
#red pawn
if sf & RedPawnFlag:
for dst in [l(src), r(src), d(src)]:
if CoordinateFlags[dst] & RedPawnFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedPawnFlag
#black pawn
if sf & BlackPawnFlag:
for dst in [l(src), r(src), u(src)]:
if CoordinateFlags[dst] & BlackPawnFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackPawnFlag
for dst in range(90):
df = CoordinateFlags[dst]
if sf & RedKingFlag and df & BlackKingFlag and src%9 == dst%9:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKingFlag
elif sf & BlackKingFlag and df & RedKingFlag and src%9 == dst%9:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackKingFlag
#rook cannon
if src != dst:
if src%9 == dst%9 or src/9 == dst/9:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedRookFlag | RedCannonFlag | BlackRookFlag | BlackCannonFlag
return MoveFlags
MoveFlags=MoveFlags()
def KnightMoves():
KnightMoves = [[23130]*16 for i in range(91)]
for sq in range(90):
ls = KnightMoves[sq]
leg = u(sq)
for dst in [l(u(leg)),r(u(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = d(sq)
for dst in [l(d(leg)),r(d(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = l(sq)
for dst in [u(l(leg)),d(l(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = r(sq)
for dst in [u(r(leg)),d(r(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
return KnightMoves
KnightMoves = KnightMoves()
def RedKingPawnMoves():
RedKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = RedKingPawnMoves[sq]
flag = CoordinateFlags[sq]
sqs = []
if flag & RedKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if CoordinateFlags[i] & RedKingFlag]
elif flag & RedPawnFlag:
sqs = [i for i in [d(sq), l(sq), r(sq)] if CoordinateFlags[i] & RedPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return RedKingPawnMoves
RedKingPawnMoves = RedKingPawnMoves()
def BlackKingPawnMoves():
BlackKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = BlackKingPawnMoves[sq]
flag = CoordinateFlags[sq]
sqs = []
if flag & BlackKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if CoordinateFlags[i] & BlackKingFlag]
elif flag & BlackPawnFlag:
sqs = [i for i in [u(sq), l(sq), r(sq)] if CoordinateFlags[i] & BlackPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return BlackKingPawnMoves
BlackKingPawnMoves = BlackKingPawnMoves()
def AdvisorBishopMoves():
AdvisorBishopMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = AdvisorBishopMoves[sq]
flag = CoordinateFlags[sq]
if flag & BishopFlag:
for square in [u(u(r(r(sq)))), u(u(l(l(sq)))), d(d(r(r(sq)))), d(d(l(l(sq))))]:
if CoordinateFlags[square] & BishopFlag:
Moves[Moves.index(90)] = square
elif flag & AdvisorFlag:
for square in [u(l(sq)), u(r(sq)), d(l(sq)), d(r(sq))]:
if CoordinateFlags[square] & AdvisorFlag:
Moves[Moves.index(90)] = square
return AdvisorBishopMoves
AdvisorBishopMoves = AdvisorBishopMoves()
def main():
dict = {}
dict['xs'] = d1a_str(Xs, u32)
dict['ys'] = d1a_str(Ys, u32)
dict['xys'] = d2a_str(XYs, u32)
dict['downs'] = d1a_str(SquareDowns, u32)
dict['ups'] = d1a_str(SquareUps, u32)
dict['lefts'] = d1a_str(SquareLefts, u32)
dict['rights'] = d1a_str(SquareRights, u32)
dict['coordinate_flags'] = d1a_str(CoordinateFlags, u32)
dict ['ptypes'] = d1a_str(PT, u32)
dict ['pflags'] = d1a_str(PF, u32)
dict ['pcolors'] = d1a_str(PC, u32)
dict ['mf'] = d2a_str(MoveFlags, u32)
dict ['kl'] = d2a_str(KnightLegs, u32)
dict['nm'] = d2a_str(KnightMoves, u32)
dict['rkpm'] = d2a_str(RedKingPawnMoves, u32)
dict['bkpm'] = d2a_str(BlackKingPawnMoves, u32)
dict['abm'] = d2a_str(AdvisorBishopMoves, u32)
template = open(os.path.join(template_path, 'xq_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'xq_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
RedKing = 0;
RedAdvisor = 1;
RedBishop = 2;
RedRook = 3;
RedKnight = 4;
RedCannon = 5;
RedPawn = 6;
BlackKing = 7;
BlackAdvisor = 8;
BlackBishop = 9;
BlackRook = 10;
BlackKnight = 11;
BlackCannon = 12;
BlackPawn = 13;
EmptyType = 14;
InvalidType = 15;
RedKingFlag = 1 << 0;
RedAdvisorFlag = 1 << 1;
RedBishopFlag = 1 << 2;
RedRookFlag = 1 << 3;
RedKnightFlag = 1 << 4;
RedCannonFlag = 1 << 5;
RedPawnFlag = 1 << 6;
BlackKingFlag = 1 << 7;
BlackAdvisorFlag = 1 << 8;
BlackBishopFlag = 1 << 9;
BlackRookFlag = 1 << 10;
BlackKnightFlag = 1 << 11;
BlackCannonFlag = 1 << 12;
BlackPawnFlag = 1 << 13;
EmptyFlag = 1 << 14;
InvaildFlag = 1 << 15;
AdvisorFlag = RedAdvisorFlag | BlackAdvisorFlag
BishopFlag = RedBishopFlag | BlackBishopFlag
RedKingPawnFlag = RedKingFlag | RedPawnFlag
AdvisorBishopFlag = RedAdvisorFlag | RedBishopFlag | BlackAdvisorFlag | BlackBishopFlag
def _(x, y): return x + y * 9
RedKingCoordinates = [_(x, y) for x in [3, 4, 5] for y in [0, 1, 2]]
RedAdvisorCoordinates = [_(4, 1)] + [_(x, y) for x in [3, 5] for y in [0, 2]]
RedBishopCoordinates = [_(x, y) for x in [0, 4, 8] for y in [2]] + [_(x, y) for x in [2, 6] for y in [0, 4]]
RedRookCoordinates = range(90)
RedKnightCoordinates = range(90)
RedCannonCoordinates = range(90)
RedPawnCoordinates = [_(x, y) for x in [0, 2, 4, 6, 8] for y in [3, 4]]
RedPawnCoordinates.extend(range(45, 90))
BlackKingCoordinates = [89 - sq for sq in RedKingCoordinates]
BlackAdvisorCoordinates = [89 - sq for sq in RedAdvisorCoordinates]
BlackBishopCoordinates = [89 - sq for sq in RedBishopCoordinates]
BlackRookCoordinates = [89 - sq for sq in RedRookCoordinates]
BlackKnightCoordinates = [89 - sq for sq in RedKnightCoordinates]
BlackCannonCoordinates = [89 - sq for sq in RedCannonCoordinates]
BlackPawnCoordinates = [89 - sq for sq in RedPawnCoordinates]
def CoordinateFlags():
CoordinateFlags = [0]*91
for sq in RedKingCoordinates:
CoordinateFlags[sq] |= RedKingFlag
CoordinateFlags[89 - sq] |= BlackKingFlag
for sq in RedAdvisorCoordinates:
CoordinateFlags[sq] |= RedAdvisorFlag
CoordinateFlags[89 - sq] |= BlackAdvisorFlag
for sq in RedBishopCoordinates:
CoordinateFlags[sq] |= RedBishopFlag
CoordinateFlags[89 - sq] |= BlackBishopFlag
for sq in RedPawnCoordinates:
CoordinateFlags[sq] |= RedPawnFlag
CoordinateFlags[89 - sq] |= BlackPawnFlag
for sq in range(90):
CoordinateFlags[sq] |= RedRookFlag
CoordinateFlags[sq] |= RedKnightFlag
CoordinateFlags[sq] |= RedCannonFlag
CoordinateFlags[sq] |= BlackRookFlag
CoordinateFlags[sq] |= BlackKnightFlag
CoordinateFlags[sq] |= BlackCannonFlag
CoordinateFlags[sq] |= EmptyFlag
CoordinateFlags[90] |= InvaildFlag
return CoordinateFlags
CoordinateFlags = CoordinateFlags()
def u64(i):
return str(i)+'ULL'
def u32(i):
return str(i)+'UL'
def s32(i):
return str(i)+'L'
def d1a_str(array_1d, func):
array_1d = [func(i) for i in array_1d]
return ', '.join(array_1d)
def d2a_str(array_2d, func):
array_2d = ['{%s}'%d1a_str(array_1d, func) for array_1d in array_2d]
return ',\n'.join(array_2d)
import os
script_path = os.path.abspath(os.path.dirname(__file__))
work_path = os.path.dirname(script_path)
folium_path = os.path.join(work_path, 'xq')
template_path = os.path.join(script_path, 'template')
if not os.path.exists(template_path):
os.mkdir(template_path)
if __name__ == "__main__":
print work_path
print script_path
print folium_path
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
def u(x): return SquareDowns[x]
def d(x): return SquareUps[x]
def l(x): return SquareLefts[x]
def r(x): return SquareRights[x]
SquareDowns = [0]*91
SquareUps = [0]*91
SquareLefts = [0]*91
SquareRights = [0]*91
Xs = [9]*91
Ys = [10]*91
XYs = [[90]*16 for i in range(16)]
KnightLegs = [[90]*128 for i in range(91)]
BishopEyes = [[90]*128 for i in range(91)]
def info():
def _(x, y):
if x < 0 or x >8 or y < 0 or y > 9:
return 90
return x + 9 * y
for sq in range(90):
#x, y = sq % 9, sq / 9
y, x = divmod(sq, 9)
SquareDowns[sq] = _(x, y - 1)
SquareUps[sq] = _(x, y + 1)
SquareLefts[sq] = _(x - 1, y)
SquareRights[sq] = _(x + 1, y)
Xs[sq] = x
Ys[sq] = y
XYs[y][x] = sq
SquareDowns[90] = 90
SquareUps[90] = 90
SquareLefts[90] = 90
SquareRights[90] = 90
info()
def leg():
u = lambda s:SquareDowns[s]
d = lambda s:SquareUps[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
for src in range(90):
leg = u(src)
dst = l(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(u(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = d(src)
dst = l(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = r(d(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = l(src)
dst = u(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(l(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg = r(src)
dst = u(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
dst = d(r(leg))
if dst != 90: KnightLegs[src][dst] = leg
leg()
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
def MoveFlags():
u = lambda s:SquareDowns[s]
d = lambda s:SquareUps[s]
l = lambda s:SquareLefts[s]
r = lambda s:SquareRights[s]
MoveFlags = [[0]*128 for i in range(91)]
for src in range(90):
sf = CoordinateFlags[src]
#red king
if sf & RedKingFlag:
for dst in [u(src), d(src), l(src), r(src)]:
if CoordinateFlags[dst] & RedKingFlag:
#这里加上兵的flag主要是为了可以区分和将见面的情况,下同
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKingFlag | RedPawnFlag
#black king
elif sf & BlackKingFlag:
for dst in [u(src), d(src), l(src), r(src)]:
if CoordinateFlags[dst] & BlackKingFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackKingFlag | BlackPawnFlag
#red advisor
if sf & RedAdvisorFlag:
for dst in [l(u(src)), l(d(src)), r(u(src)), r(d(src))]:
if CoordinateFlags[dst] & RedAdvisorFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedAdvisorFlag
#black advisor
elif sf & BlackAdvisorFlag:
for dst in [l(u(src)), l(d(src)), r(u(src)), r(d(src))]:
if CoordinateFlags[dst] & BlackAdvisorFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackAdvisorFlag
#red bishop
elif sf & RedBishopFlag:
for dst in [l(l(u(u(src)))), l(l(d(d(src)))), r(r(u(u(src)))), r(r(d(d(src))))]:
if CoordinateFlags[dst] & RedBishopFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedBishopFlag
#black bishop
elif sf & BlackBishopFlag:
for dst in [l(l(u(u(src)))), l(l(d(d(src)))), r(r(u(u(src)))), r(r(d(d(src))))]:
if CoordinateFlags[dst] & BlackBishopFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackBishopFlag
#knight
for dst in [l(u(u(src))), l(d(d(src))), r(u(u(src))), r(d(d(src))), l(l(u(src))), l(l(d(src))), r(r(u(src))), r(r(d(src)))]:
if dst in range(90):
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKnightFlag | BlackKnightFlag
#red pawn
if sf & RedPawnFlag:
for dst in [l(src), r(src), d(src)]:
if CoordinateFlags[dst] & RedPawnFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedPawnFlag
#black pawn
if sf & BlackPawnFlag:
for dst in [l(src), r(src), u(src)]:
if CoordinateFlags[dst] & BlackPawnFlag:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackPawnFlag
for dst in range(90):
df = CoordinateFlags[dst]
if sf & RedKingFlag and df & BlackKingFlag and src%9 == dst%9:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedKingFlag
elif sf & BlackKingFlag and df & RedKingFlag and src%9 == dst%9:
MoveFlags[dst][src] = MoveFlags[dst][src] | BlackKingFlag
#rook cannon
if src != dst:
if src%9 == dst%9 or src/9 == dst/9:
MoveFlags[dst][src] = MoveFlags[dst][src] | RedRookFlag | RedCannonFlag | BlackRookFlag | BlackCannonFlag
return MoveFlags
MoveFlags=MoveFlags()
def KnightMoves():
KnightMoves = [[23130]*16 for i in range(91)]
for sq in range(90):
ls = KnightMoves[sq]
leg = u(sq)
for dst in [l(u(leg)),r(u(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = d(sq)
for dst in [l(d(leg)),r(d(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = l(sq)
for dst in [u(l(leg)),d(l(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
leg = r(sq)
for dst in [u(r(leg)),d(r(leg))]:
if dst != 90:
ls[ls.index(23130)] = (leg << 8) | dst
return KnightMoves
KnightMoves = KnightMoves()
def RedKingPawnMoves():
RedKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = RedKingPawnMoves[sq]
flag = CoordinateFlags[sq]
sqs = []
if flag & RedKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if CoordinateFlags[i] & RedKingFlag]
elif flag & RedPawnFlag:
sqs = [i for i in [d(sq), l(sq), r(sq)] if CoordinateFlags[i] & RedPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return RedKingPawnMoves
RedKingPawnMoves = RedKingPawnMoves()
def BlackKingPawnMoves():
BlackKingPawnMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = BlackKingPawnMoves[sq]
flag = CoordinateFlags[sq]
sqs = []
if flag & BlackKingFlag:
sqs = [i for i in [u(sq), d(sq), l(sq), r(sq)] if CoordinateFlags[i] & BlackKingFlag]
elif flag & BlackPawnFlag:
sqs = [i for i in [u(sq), l(sq), r(sq)] if CoordinateFlags[i] & BlackPawnFlag]
for sq in sqs:
Moves[Moves.index(90)] = sq
return BlackKingPawnMoves
BlackKingPawnMoves = BlackKingPawnMoves()
def AdvisorBishopMoves():
AdvisorBishopMoves = [[90]*8 for i in range(91)]
for sq in range(90):
Moves = AdvisorBishopMoves[sq]
flag = CoordinateFlags[sq]
if flag & BishopFlag:
for square in [u(u(r(r(sq)))), u(u(l(l(sq)))), d(d(r(r(sq)))), d(d(l(l(sq))))]:
if CoordinateFlags[square] & BishopFlag:
Moves[Moves.index(90)] = square
elif flag & AdvisorFlag:
for square in [u(l(sq)), u(r(sq)), d(l(sq)), d(r(sq))]:
if CoordinateFlags[square] & AdvisorFlag:
Moves[Moves.index(90)] = square
return AdvisorBishopMoves
AdvisorBishopMoves = AdvisorBishopMoves()
def main():
dict = {}
dict['xs'] = d1a_str(Xs, u32)
dict['ys'] = d1a_str(Ys, u32)
dict['xys'] = d2a_str(XYs, u32)
dict['downs'] = d1a_str(SquareDowns, u32)
dict['ups'] = d1a_str(SquareUps, u32)
dict['lefts'] = d1a_str(SquareLefts, u32)
dict['rights'] = d1a_str(SquareRights, u32)
dict['coordinate_flags'] = d1a_str(CoordinateFlags, u32)
dict ['ptypes'] = d1a_str(PT, u32)
dict ['pflags'] = d1a_str(PF, u32)
dict ['pcolors'] = d1a_str(PC, u32)
dict ['mf'] = d2a_str(MoveFlags, u32)
dict ['kl'] = d2a_str(KnightLegs, u32)
dict['nm'] = d2a_str(KnightMoves, u32)
dict['rkpm'] = d2a_str(RedKingPawnMoves, u32)
dict['bkpm'] = d2a_str(BlackKingPawnMoves, u32)
dict['abm'] = d2a_str(AdvisorBishopMoves, u32)
template = open(os.path.join(template_path, 'xq_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'xq_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#coding=utf-8
import os
import string
from consts import *
from xq_data import *
RedKingPawnValues = [0]*91
BlackKingPawnValues = [0]*91
AdvisorBishopValues = [0]*91
RedRookValues = [0]*91
BlackRookValues = [0]*91
RedKnightValues = [0]*91
BlackKnightValues = [0]*91
RedCannonValues = [0]*91
BlackCannonValues = [0]*91
def value():
KingBaseValue = 5000
AdvisorBaseValue = 40
BishopBaseValue = 40
RookBaseValue = 200
KnightBaseValue = 88
CannonBaseValue = 96
PawnBaseValue = 9
RedKingPawnPositionValues = [
0, 0, 0, 1, 5, 1, 0, 0, 0,
0, 0, 0, -8, -8, -8, 0, 0, 0,
0, 0, 0, -9, -9, -9, 0, 0, 0,
-2, 0, -2, 0, 6, 0, -2, 0, -2,
3, 0, 4, 0, 7, 0, 4, 0, 3,
10, 18, 22, 35, 40, 35, 22, 18, 10,
20, 27, 30, 40, 42, 40, 35, 27, 20,
20, 30, 45, 55, 55, 55, 45, 30, 20,
20, 30, 50, 65, 70, 65, 50, 30, 20,
0, 0, 0, 2, 4, 2, 0, 0, 0,]
RedAdvisorBishopPositionValues = [
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0,
-2, 0, 0, 0, 3, 0, 0, 0, -2,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,]
RedRookPositionValues = [
-6, 6, 4, 12, 0, 12, 4, 6, -6,
5, 8, 6, 12, 0, 12, 6, 8, 5,
-2, 8, 4, 12, 12, 12, 4, 8, -2,
4, 9, 4, 12, 14, 12, 4, 9, 4,
8, 12, 12, 14, 15, 14, 12, 12, 8,
8, 11, 11, 14, 15, 14, 11, 11, 8,
6, 13, 13, 16, 16, 16, 13, 13, 6,
6, 8, 7, 14, 16, 14, 7, 8, 6,
6, 12, 9, 16, 33, 16, 9, 12, 6,
6, 8, 7, 13, 14, 13, 7, 8, 6,]
RedKnightPositionValues = [
0, -3, 2, 0, 2, 0, 2, -3, 0,
-3, 2, 4, 5, -10, 5, 4, 2, -3,
5, 4, 6, 7, 4, 7, 6, 4, 5,
4, 6, 10, 7, 10, 7, 10, 6, 4,
2, 10, 13, 14, 15, 14, 13, 10, 2,
2, 12, 11, 15, 16, 15, 11, 12, 2,
5, 20, 12, 19, 12, 19, 12, 20, 5,
4, 10, 11, 15, 11, 15, 11, 10, 4,
2, 8, 15, 9, 6, 9, 15, 8, 2,
2, 2, 2, 8, 2, 8, 2, 2, 2,]
RedCannonPositionValues = [
0, 0, 1, 3, 3, 3, 1, 0, 0,
0, 1, 2, 2, 2, 2, 2, 1, 0,
1, 0, 4, 3, 5, 3, 4, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 3, 0, 4, 0, 3, 0, -1,
0, 0, 0, 0, 4, 0, 0, 0, 0,
0, 3, 3, 2, 4, 2, 3, 3, 0,
1, 1, 0, -5, -4, -5, 0, 1, 1,
2, 2, 0, -4, -7, -4, 0, 2, 2,
4, 4, 0, -5, -6, -5, 0, 4, 4,]
for sq in range(90):
flag = CoordinateFlags[sq]
if flag & RedKingFlag:
RedKingPawnValues[sq] = KingBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
if flag & RedAdvisorFlag:
AdvisorBishopValues[sq] = AdvisorBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
if flag & RedBishopFlag:
AdvisorBishopValues[sq] = BishopBaseValue + RedAdvisorBishopPositionValues[sq]
AdvisorBishopValues[89 - sq] = -AdvisorBishopValues[sq]
RedRookValues[sq] = RookBaseValue + RedRookPositionValues[sq]
BlackRookValues[89 - sq] = -RedRookValues[sq]
RedKnightValues[sq] = KnightBaseValue + RedKnightPositionValues[sq]
BlackKnightValues[89 - sq] = -RedKnightValues[sq]
RedCannonValues[sq] = CannonBaseValue + RedCannonPositionValues[sq]
BlackCannonValues[89 - sq] = -RedCannonValues[sq]
if flag & RedPawnFlag:
RedKingPawnValues[sq] = PawnBaseValue + RedKingPawnPositionValues[sq]
BlackKingPawnValues[89 - sq] = -RedKingPawnValues[sq]
value()
RedKingPawnLocks = [0]*91
BlackKingPawnLocks = [0]*91
AdvisorBishopLocks = [0]*91
RedRookLocks = [0]*91
BlackRookLocks = [0]*91
RedKnightLocks = [0]*91
BlackKnightLocks = [0]*91
RedCannonLocks = [0]*91
BlackCannonLocks = [0]*91
RedKingPawnKeys = [0]*91
BlackKingPawnKeys = [0]*91
AdvisorBishopKeys = [0]*91
RedRookKeys = [0]*91
BlackRookKeys = [0]*91
RedKnightKeys = [0]*91
BlackKnightKeys = [0]*91
RedCannonKeys = [0]*91
BlackCannonKeys = [0]*91
def hash():
from random import randint, seed
seed(51)
for sq in range(90):
flag = CoordinateFlags[sq]
if flag & RedKingPawnFlag:
RedKingPawnLocks[sq] = randint(0, 0x10000000000000000)
RedKingPawnKeys[sq] = randint(0, 0x100000000)
BlackKingPawnLocks[89 - sq] = randint(0, 0x10000000000000000)
BlackKingPawnKeys[89 - sq] = randint(0, 0x100000000)
if flag & AdvisorBishopFlag:
AdvisorBishopLocks[sq] = randint(0, 0x10000000000000000)
AdvisorBishopKeys[sq] = randint(0, 0x100000000)
RedRookLocks[sq] = randint(0, 0x10000000000000000)
RedRookKeys[sq] = randint(0, 0x100000000)
BlackRookLocks[sq] = randint(0, 0x10000000000000000)
BlackRookKeys[sq] = randint(0, 0x100000000)
RedKnightLocks[sq] = randint(0, 0x10000000000000000)
RedKnightKeys[sq] = randint(0, 0x100000000)
BlackKnightLocks[sq] = randint(0, 0x10000000000000000)
BlackKnightKeys[sq] = randint(0, 0x100000000)
RedCannonLocks[sq] = randint(0, 0x10000000000000000)
RedCannonKeys[sq] = randint(0, 0x100000000)
BlackCannonLocks[sq] = randint(0, 0x10000000000000000)
BlackCannonKeys[sq] = randint(0, 0x100000000)
file = open('hash.data')
for seq in [RedKingPawnLocks,BlackKingPawnLocks,AdvisorBishopLocks,RedRookLocks,BlackRookLocks,RedKnightLocks,BlackKnightLocks,RedCannonLocks,BlackCannonLocks]:
for i in range(90):
i1 = int(file.readline())
i2 = int(file.readline())
seq[i] = (i1<<32)|i2
for seq in [RedKingPawnKeys,BlackKingPawnKeys,AdvisorBishopKeys,RedRookKeys,BlackRookKeys,RedKnightKeys,BlackKnightKeys,RedCannonKeys,BlackCannonKeys]:
for i in range(90):
seq[i] = int(file.readline())
hash()
def main():
dict = {}
dict['rkpl'] = d1a_str(RedKingPawnLocks, u64)
dict['rkpk'] = d1a_str(RedKingPawnKeys, u32)
dict['rkpv'] = d1a_str(RedKingPawnValues, s32)
dict['bkpl'] = d1a_str(BlackKingPawnLocks, u64)
dict['bkpk'] = d1a_str(BlackKingPawnKeys, u32)
dict['bkpv'] = d1a_str(BlackKingPawnValues, s32)
dict['abl'] = d1a_str(AdvisorBishopLocks, u64)
dict['abk'] = d1a_str(AdvisorBishopKeys, u32)
dict['abv'] = d1a_str(AdvisorBishopValues, s32)
dict['rrl'] = d1a_str(RedRookLocks, u64)
dict['rrk'] = d1a_str(RedRookKeys, u32)
dict['rrv'] = d1a_str(RedRookValues, s32)
dict['brl'] = d1a_str(BlackRookLocks, u64)
dict['brk'] = d1a_str(BlackRookKeys, u32)
dict['brv'] = d1a_str(BlackRookValues, s32)
dict['rnl'] = d1a_str(RedKnightLocks, u64)
dict['rnk'] = d1a_str(RedKnightKeys, u32)
dict['rnv'] = d1a_str(RedKnightValues, s32)
dict['bnl'] = d1a_str(BlackKnightLocks, u64)
dict['bnk'] = d1a_str(BlackKnightKeys, u32)
dict['bnv'] = d1a_str(BlackKnightValues, s32)
dict['rcl'] = d1a_str(RedCannonLocks, u64)
dict['rck'] = d1a_str(RedCannonKeys, u32)
dict['rcv'] = d1a_str(RedCannonValues, s32)
dict['bcl'] = d1a_str(BlackCannonLocks, u64)
dict['bck'] = d1a_str(BlackCannonKeys, u32)
dict['bcv'] = d1a_str(BlackCannonValues, s32)
PF = [RedKingFlag]+[RedAdvisorFlag]*2+[RedBishopFlag]*2\
+[RedRookFlag]*2+[RedKnightFlag]*2+[RedCannonFlag]*2+[RedPawnFlag]*5\
+[BlackKingFlag]+[BlackAdvisorFlag]*2+[BlackBishopFlag]*2\
+[BlackRookFlag]*2+[BlackKnightFlag]*2+[BlackCannonFlag]*2+[BlackPawnFlag]*5\
+[EmptyFlag, InvaildFlag]
PT = [RedKing]+[RedAdvisor]*2+[RedBishop]*2+[RedRook]*2+[RedKnight]*2+[RedCannon]*2+[RedPawn]*5\
+[BlackKing]+[BlackAdvisor]*2+[BlackBishop]*2+[BlackRook]*2+[BlackKnight]*2+[BlackCannon]*2+[BlackPawn]*5\
+[EmptyType]+[InvalidType]
PC = [0]*16 + [1]*16 + [2] + [3]
PL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_red_rook_locks']*2+['s_red_knight_locks']*2\
+['s_red_cannon_locks']*2+['s_red_king_pawn_locks']*5\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*4+['s_black_rook_locks']*2+['s_black_knight_locks']*2\
+['s_black_cannon_locks']*2+['s_black_king_pawn_locks']*5
PK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_red_rook_keys']*2+['s_red_knight_keys']*2\
+['s_red_cannon_keys']*2+['s_red_king_pawn_keys']*5\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*4+['s_black_rook_keys']*2+['s_black_knight_keys']*2\
+['s_black_cannon_keys']*2+['s_black_king_pawn_keys']*5
PV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*4+['s_red_rook_values']*2+['s_red_knight_values']*2\
+['s_red_cannon_values']*2+['s_red_king_pawn_values']*5\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*4+['s_black_rook_values']*2+['s_black_knight_values']*2\
+['s_black_cannon_values']*2+['s_black_king_pawn_values']*5
dict['plocks'] = d1a_str(PL, lambda x: x)
dict['pkeys'] = d1a_str(PK, lambda x: x)
dict['pvalues'] = d1a_str(PV, lambda x: x)
TL = ['s_red_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_red_rook_locks','s_red_knight_locks','s_red_cannon_locks','s_red_king_pawn_locks']\
+['s_black_king_pawn_locks']+['s_advisor_bishop_locks']*2\
+['s_black_rook_locks','s_black_knight_locks','s_black_cannon_locks','s_black_king_pawn_locks']
TK = ['s_red_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_red_rook_keys','s_red_knight_keys','s_red_cannon_keys','s_red_king_pawn_keys']\
+['s_black_king_pawn_keys']+['s_advisor_bishop_keys']*2\
+['s_black_rook_keys','s_black_knight_keys','s_black_cannon_keys','s_black_king_pawn_keys']
TV = ['s_red_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_red_rook_values','s_red_knight_values','s_red_cannon_values','s_red_king_pawn_values']\
+['s_black_king_pawn_values']+['s_advisor_bishop_values']*2\
+['s_black_rook_values','s_black_knight_values','s_black_cannon_values','s_black_king_pawn_values']
dict['tlocks'] = d1a_str(TL, lambda x: x)
dict['tkeys'] = d1a_str(TK, lambda x: x)
dict['tvalues'] = d1a_str(TV, lambda x: x)
#template = string.Template(template)
template = open(os.path.join(template_path, 'xq_position_data.cpp.tmpl'), 'rb').read()
template = string.Template(template)
path = os.path.join(folium_path, 'xq_position_data.cpp')
open(path, 'wb').write(str(template.safe_substitute(dict)))
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#coding=utf-8
def ucci():
import snake.protocol
engine = snake.protocol.Engine()
engine.run()
def perft():
import _xq
def _():
_xq.writeline(str(xq))
for index, count in enumerate(perfts):
t1 = _xq.currenttime()
assert(xq.perft(index) == count)
t2 = _xq.currenttime()
if t2 - t1 > 0.1:
_xq.writeline("ply:%d\tcount:%d\tnode:%d"%(index+1, count, int(count/(t2-t1))))
xq = _xq.XQ("r1ba1a3/4kn3/2n1b4/pNp1p1p1p/4c4/6P2/P1P2R2P/1CcC5/9/2BAKAB2 r")
perfts = [38, 1128, 43929, 1339047, 53112976]
_()
xq = _xq.XQ("r2akab1r/3n5/4b3n/p1p1pRp1p/9/2P3P2/P3P3c/N2C3C1/4A4/1RBAK1B2 r")
perfts = [58, 1651, 90744, 2605437, 140822416]
_()
xq = _xq.XQ("rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR r")
perfts = [44, 1920, 79666, 3290240, 133312995, 5392831844]
#_() | Python |
from fonetik import *
import unittest
class FonetikUnitTest(unittest.TestCase):
def setUp(self):
self.f = Fonetik()
class CapitalizationNormalization(FonetikUnitTest):
words = ( ("FOO","fo"),
("Foo","fo"),
("FoO","fo"),
)
def testCapitalizationIsNoMatter(self):
for input,output in self.words:
result = self.f.translate(input)
self.assertEqual(result,output)
class SpecialCharaters(FonetikUnitTest):
words = ( ("Salchichón","salchichon"),
("Jamón","jamon"),
("Strüdel","strudel"),
("carrona","Carroña"))
def testIfSpecialCharactersAreAllowed(self):
for word1,word2 in self.words:
result1 = self.f.translate(word1)
result2 = self.f.translate(word2)
self.assertEqual(result1,result2)
class FonetikTester():
def __init__(self):
self.f = Fonetik()
def run(self):
unittest.main()
def main():
ft = FonetikTester()
ft.run()
main()
| Python |
import yaml
import sys
import re
import string
class Phonem():
def __init__(self):
self.name = "phonemName"
self.values = ['p1','p2']
class PhonemaLoader():
def __init__(self):
pass
def load_phonema(self,phonema_file_name):
file = open(phonema_file_name,'r')
full_text = file.read()
file.close()
phonema = yaml.load(full_text)
return phonema
class Fonetik():
def __init__(self):
pl=PhonemaLoader()
self.phonema = pl.load_phonema("phonema.yml")
def translate(self,word):
remain_word = string.lower(word)
remain_word = re.sub('ñ','n',remain_word)
remain_word = re.sub('á','a',remain_word)
remain_word = re.sub('é','e',remain_word)
remain_word = re.sub('í','i',remain_word)
remain_word = re.sub('ó','o',remain_word)
remain_word = re.sub('ú','u',remain_word)
remain_word = re.sub('ü','u',remain_word)
result_word = ''
for phonem in self.phonema:
for key in phonem.keys():
for p in phonem[key]:
result = re.search(str(p),remain_word)
if result is not None:
#print key + ": " + str(result.span()) + " Group:" + str(result.group(0))
remain_word = re.sub(str(p),'',remain_word)
result_word += key
#print "["+result_word+"] [" + remain_word + "]"
return result_word
#for key in self.phonema.keys():
# print key
# #print "trying " + str(self.phonema[key])
# phonems = self.phonema[key]
# for p in phonems:
# result = re.search(str(p),remain_word)
#
# if result is not None:
# print key + ": " + str(result.span()) + " Group:" + str(result.group(0))
# remain_word = re.sub(str(p),'',remain_word)
# result_word += key
# print "["+result_word+"] [" + remain_word + "]"
def main():
f=Fonetik()
words = []
if len(sys.argv) > 1:
words = sys.argv
del words[0]
for word in words:
f.translate(word)
if __name__ == '__main__':
sys.exit(main())
| Python |
from fonetik import *
import unittest
class FonetikUnitTest(unittest.TestCase):
def setUp(self):
self.f = Fonetik()
class CapitalizationNormalization(FonetikUnitTest):
words = ( ("FOO","fo"),
("Foo","fo"),
("FoO","fo"),
)
def testCapitalizationIsNoMatter(self):
for input,output in self.words:
result = self.f.translate(input)
self.assertEqual(result,output)
class SpecialCharaters(FonetikUnitTest):
words = ( ("Salchichón","salchichon"),
("Jamón","jamon"),
("Strüdel","strudel"),
("carrona","Carroña"))
def testIfSpecialCharactersAreAllowed(self):
for word1,word2 in self.words:
result1 = self.f.translate(word1)
result2 = self.f.translate(word2)
self.assertEqual(result1,result2)
class FonetikTester():
def __init__(self):
self.f = Fonetik()
def run(self):
unittest.main()
def main():
ft = FonetikTester()
ft.run()
main()
| Python |
import yaml
import sys
import re
import string
class Phonem():
def __init__(self):
self.name = "phonemName"
self.values = ['p1','p2']
class PhonemaLoader():
def __init__(self):
pass
def load_phonema(self,phonema_file_name):
file = open(phonema_file_name,'r')
full_text = file.read()
file.close()
phonema = yaml.load(full_text)
return phonema
class Fonetik():
def __init__(self):
pl=PhonemaLoader()
self.phonema = pl.load_phonema("phonema.yml")
def translate(self,word):
remain_word = string.lower(word)
remain_word = re.sub('ñ','n',remain_word)
remain_word = re.sub('á','a',remain_word)
remain_word = re.sub('é','e',remain_word)
remain_word = re.sub('í','i',remain_word)
remain_word = re.sub('ó','o',remain_word)
remain_word = re.sub('ú','u',remain_word)
remain_word = re.sub('ü','u',remain_word)
result_word = ''
for phonem in self.phonema:
for key in phonem.keys():
for p in phonem[key]:
result = re.search(str(p),remain_word)
if result is not None:
#print key + ": " + str(result.span()) + " Group:" + str(result.group(0))
remain_word = re.sub(str(p),'',remain_word)
result_word += key
#print "["+result_word+"] [" + remain_word + "]"
return result_word
#for key in self.phonema.keys():
# print key
# #print "trying " + str(self.phonema[key])
# phonems = self.phonema[key]
# for p in phonems:
# result = re.search(str(p),remain_word)
#
# if result is not None:
# print key + ": " + str(result.span()) + " Group:" + str(result.group(0))
# remain_word = re.sub(str(p),'',remain_word)
# result_word += key
# print "["+result_word+"] [" + remain_word + "]"
def main():
f=Fonetik()
words = []
if len(sys.argv) > 1:
words = sys.argv
del words[0]
for word in words:
f.translate(word)
if __name__ == '__main__':
sys.exit(main())
| Python |
from fonetik import *
import unittest
class FonetikUnitTest(unittest.TestCase):
def setUp(self):
self.f = Fonetik()
class CapitalizationNormalization(FonetikUnitTest):
words = ( ("FOO","fo"),
("Foo","fo"),
("FoO","fo"),
)
def testCapitalizationIsNoMatter(self):
for input,output in self.words:
result = self.f.translate(input)
self.assertEqual(result,output)
class ClosureCheck(FonetikUnitTest):
words = ("Negrita","Super ocho","Bambino","Marraqueta","Pan con jamon","ave palta")
def testIfReTranslationIsConsistent(self):
for word in self.words:
result = self.f.translate(word)
result2 = self.f.translate(result)
self.assertEqual(result,result2)
class SpecialCharaters(FonetikUnitTest):
words = ( ("Salchichón","salchichon"),
("Jamón","jamon"),
("Strüdel","strudel"),
("carrona","Carroña"))
def testIfSpecialCharactersAreAllowed(self):
for word1,word2 in self.words:
result1 = self.f.translate(word1)
result2 = self.f.translate(word2)
self.assertEqual(result1,result2)
class FonetikTester():
def __init__(self):
self.f = Fonetik()
def run(self):
unittest.main()
def main():
ft = FonetikTester()
ft.run()
main()
| Python |
import yaml
import sys
import re
import string
class Phonem():
def __init__(self):
self.name = "phonemName"
self.values = ['p1','p2']
class PhonemaLoader():
def __init__(self):
pass
def load_phonema(self,phonema_file_name):
file = open(phonema_file_name,'r')
full_text = file.read()
file.close()
phonema = yaml.load(full_text)
return phonema
class Fonetik():
def __init__(self):
pl=PhonemaLoader()
self.phonema = pl.load_phonema("phonema.yml")
def translate(self,word):
remain_word = string.lower(word)
remain_word = re.sub('ñ','n',remain_word)
remain_word = re.sub('á','a',remain_word)
remain_word = re.sub('é','e',remain_word)
remain_word = re.sub('í','i',remain_word)
remain_word = re.sub('ó','o',remain_word)
remain_word = re.sub('ú','u',remain_word)
remain_word = re.sub('ü','u',remain_word)
result_word = ''
for phonem in self.phonema:
for key in phonem.keys():
for p in phonem[key]:
result = re.search(str(p),remain_word)
if result is not None:
#print key + ": " + str(result.span()) + " Group:" + str(result.group(0))
remain_word = re.sub(str(p),'',remain_word)
result_word += key
#print "["+result_word+"] [" + remain_word + "]"
return result_word
#for key in self.phonema.keys():
# print key
# #print "trying " + str(self.phonema[key])
# phonems = self.phonema[key]
# for p in phonems:
# result = re.search(str(p),remain_word)
#
# if result is not None:
# print key + ": " + str(result.span()) + " Group:" + str(result.group(0))
# remain_word = re.sub(str(p),'',remain_word)
# result_word += key
# print "["+result_word+"] [" + remain_word + "]"
def main():
f=Fonetik()
words = []
if len(sys.argv) > 1:
words = sys.argv
del words[0]
for word in words:
f.translate(word)
if __name__ == '__main__':
sys.exit(main())
| Python |
import fonetik
def main():
Fonetik()
main()
| Python |
import yaml
import sys
import re
class Phonem():
def __init__(self):
self.name = "phonemName"
self.values = ['p1','p2']
class PhonemaLoader():
def __init__(self):
pass
def load_phonema(self,phonema_file_name):
file = open(phonema_file_name,'r')
full_text = file.read()
file.close()
phonema = yaml.load(full_text)
return phonema
class Fonetik():
def __init__(self):
pl=PhonemaLoader()
self.phonema = pl.load_phonema("phonema.yml")
def translate(self,word):
print "Translating " + word
remain_word = word
result_word = ''
for phonem in self.phonema:
for key in phonem.keys():
for p in phonem[key]:
result = re.search(str(p),remain_word)
if result is not None:
#print key + ": " + str(result.span()) + " Group:" + str(result.group(0))
remain_word = re.sub(str(p),'',remain_word)
result_word += key
#print "["+result_word+"] [" + remain_word + "]"
print result_word
#for key in self.phonema.keys():
# print key
# #print "trying " + str(self.phonema[key])
# phonems = self.phonema[key]
# for p in phonems:
# result = re.search(str(p),remain_word)
#
# if result is not None:
# print key + ": " + str(result.span()) + " Group:" + str(result.group(0))
# remain_word = re.sub(str(p),'',remain_word)
# result_word += key
# print "["+result_word+"] [" + remain_word + "]"
def main():
f=Fonetik()
words = []
if len(sys.argv) > 1:
words = sys.argv
del words[0]
for word in words:
f.translate(word)
if __name__ == '__main__':
sys.exit(main())
| Python |
from fonetik import *
import unittest
class FonetikUnitTest(unittest.TestCase):
def setUp(self):
self.f = Fonetik()
class CapitalizationNormalization(FonetikUnitTest):
words = ( ("FOO","fo"),
("Foo","fo"),
("FoO","fo"),
)
def testCapitalizationIsNoMatter(self):
for input,output in self.words:
result = self.f.translate(input)
self.assertEqual(result,output)
class SpecialCharaters(FonetikUnitTest):
words = ( ("Salchichón","salchichon"),
("Jamón","jamon"),
("Strüdel","strudel"),
("carrona","Carroña"))
def testIfSpecialCharactersAreAllowed(self):
for word1,word2 in self.words:
result1 = self.f.translate(word1)
result2 = self.f.translate(word2)
self.assertEqual(result1,result2)
class FonetikTester():
def __init__(self):
self.f = Fonetik()
def run(self):
unittest.main()
def main():
ft = FonetikTester()
ft.run()
main()
| Python |
import yaml
import sys
import re
import string
class Phonem():
def __init__(self):
self.name = "phonemName"
self.values = ['p1','p2']
class PhonemaLoader():
def __init__(self):
pass
def load_phonema(self,phonema_file_name):
file = open(phonema_file_name,'r')
full_text = file.read()
file.close()
phonema = yaml.load(full_text)
return phonema
class Fonetik():
def __init__(self):
pl=PhonemaLoader()
self.phonema = pl.load_phonema("phonema.yml")
def translate(self,word):
remain_word = string.lower(word)
remain_word = re.sub('ñ','n',remain_word)
remain_word = re.sub('á','a',remain_word)
remain_word = re.sub('é','e',remain_word)
remain_word = re.sub('í','i',remain_word)
remain_word = re.sub('ó','o',remain_word)
remain_word = re.sub('ú','u',remain_word)
remain_word = re.sub('ü','u',remain_word)
result_word = ''
for phonem in self.phonema:
for key in phonem.keys():
for p in phonem[key]:
result = re.search(str(p),remain_word)
if result is not None:
#print key + ": " + str(result.span()) + " Group:" + str(result.group(0))
remain_word = re.sub(str(p),'',remain_word)
result_word += key
#print "["+result_word+"] [" + remain_word + "]"
print word + " " + result_word
return result_word
#for key in self.phonema.keys():
# print key
# #print "trying " + str(self.phonema[key])
# phonems = self.phonema[key]
# for p in phonems:
# result = re.search(str(p),remain_word)
#
# if result is not None:
# print key + ": " + str(result.span()) + " Group:" + str(result.group(0))
# remain_word = re.sub(str(p),'',remain_word)
# result_word += key
# print "["+result_word+"] [" + remain_word + "]"
def main():
f=Fonetik()
words = []
if len(sys.argv) > 1:
words = sys.argv
del words[0]
for word in words:
f.translate(word)
if __name__ == '__main__':
sys.exit(main())
| Python |
#!/usr/bin/python
# Copyright (C) 2008 Manu Garg.
# Author: Manu Garg <manugarg@gmail.com>
#
# pacparser is a library that provides methods to parse proxy auto-config
# (PAC) files. Please read README file included with this package for more
# information about this library.
#
# pacparser is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# pacparser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA
"""
This script demonstrates how python web clients can use
proxy auto-config (PAC) files for proxy configuration using pacparser.
It take a PAC file and an url as arguments, fetches the URL using the
proxy as determined by PAC file and URL and returns the retrieved webpage.
"""
__author__ = 'manugarg@gmail.com (Manu Garg)'
__copyright__ = 'Copyright (C) 2008 Manu Garg'
__license__ = 'LGPL'
import pacparser
import socket
import sys
import urllib
def fetch_url_using_pac(pac, url):
try:
proxy_string = pacparser.just_find_proxy(pac, url)
except:
sys.stderr.write('could not determine proxy using Pacfile\n')
return None
proxylist = proxy_string.split(";")
proxies = None # Dictionary to be passed to urlopen method of urllib
while proxylist:
proxy = proxylist.pop(0).strip()
if 'DIRECT' in proxy:
proxies = {}
break
if proxy[0:5].upper() == 'PROXY':
proxy = proxy[6:].strip()
if isproxyalive(proxy):
proxies = {'http': 'http://%s' % proxy}
break
try:
sys.stderr.write('trying to fetch the page using proxy %s\n' % proxy)
response = urllib.urlopen(url, proxies=proxies)
except Exception, e:
sys.stderr.write('could not fetch webpage %s using proxy %s\n' %
(url, proxies))
sys.stderr.write(str(e)+'\n')
return None
return response
def isproxyalive(proxy):
host_port = proxy.split(":")
if len(host_port) != 2:
sys.stderr.write('proxy host is not defined as host:port\n')
return False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
try:
s.connect((host_port[0], int(host_port[1])))
except Exception, e:
sys.stderr.write('proxy %s is not accessible\n' % proxy)
sys.stderr.write(str(e)+'\n')
return False
s.close()
return True
def main():
if len(sys.argv) != 3:
print 'Not enough arguments'
print 'Usage:\n%s <pacfile> <url>' % sys.argv[0]
return None
pacfile = sys.argv[1]
url = sys.argv[2]
response = fetch_url_using_pac(pacfile, url)
if response:
print response.read()
else:
sys.stderr.write('URL %s could not be retrieved using PAC file %s.' %
(url, pacfile))
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.5
import pacparser
pacparser.init()
pacparser.parse_pac("wpad.dat")
proxy = pacparser.find_proxy("http://www.manugarg.com")
print proxy
pacparser.cleanup()
# Or simply,
print pacparser.just_find_proxy("wpad.dat", "http://www2.manugarg.com")
| Python |
# Copyright (C) 2007 Manu Garg.
# Author: Manu Garg <manugarg@gmail.com>
#
# pacparser is a library that provides methods to parse proxy auto-config
# (PAC) files. Please read README file included with this package for more
# information about this library.
#
# pacparser is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# pacparser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
Python module to parse pac files. Look at project's homepage
http://code.google.com/p/pacparser for more information.
"""
__author__ = 'manugarg@gmail.com (Manu Garg)'
__copyright__ = 'Copyright (C) 2008 Manu Garg'
__license__ = 'LGPL'
from pacparser import _pacparser
import os
import re
import sys
_url_regex = re.compile('.*\:\/\/([^\/]+).*')
def init():
"""
Initializes pacparser engine.
"""
_pacparser.init()
def parse_pac(pacfile):
"""
(Deprecated) Same as parse_pac_file.
"""
parse_pac_file(pacfile)
def parse_pac_file(pacfile):
"""
Reads the pacfile and evaluates it in the Javascript engine created by
init().
"""
try:
f = open(pacfile)
pac_script = f.read()
except IOError:
print('Could not read the pacfile: %s\n%s' % (pacfile, sys.exc_info()[1]))
return
f.close()
_pacparser.parse_pac_string(pac_script)
def parse_pac_string(pac_script):
"""
Evaluates pac_script in the Javascript engine created by init().
"""
_pacparser.parse_pac_string(pac_script)
def find_proxy(url, host=None):
"""
Finds proxy string for the given url and host. If host is not
defined, it's extracted from the url.
"""
if host is None:
m = _url_regex.match(url)
if not m:
print('URL: %s is not a valid URL' % url)
return None
if len(m.groups()) is 1:
host = m.groups()[0]
else:
print('URL: %s is not a valid URL' % url)
return None
return _pacparser.find_proxy(url, host)
def version():
"""
Returns the compiled pacparser version.
"""
return _pacparser.version()
def cleanup():
"""
Destroys pacparser engine.
"""
_pacparser.cleanup()
def just_find_proxy(pacfile, url, host=None):
"""
This function is a wrapper around init, parse_pac, find_proxy
and cleanup. This is the function to call if you want to find
proxy just for one url.
"""
if os.path.isfile(pacfile):
pass
else:
print('PAC file: %s doesn\'t exist' % pacfile)
return None
if host is None:
m = _url_regex.match(url)
if not m:
print('URL: %s is not a valid URL' % url)
return None
if len(m.groups()) is 1:
host = m.groups()[0]
else:
print('URL: %s is not a valid URL' % url)
return None
init()
parse_pac(pacfile)
proxy = find_proxy(url,host)
cleanup()
return proxy
def setmyip(ip_address):
"""
Set my ip address. This is the IP address returned by myIpAddress()
"""
_pacparser.setmyip(ip_address)
def enable_microsoft_extensions():
"""
Enables Microsoft PAC extensions (dnsResolveEx, isResolvableEx,
myIpAddressEx).
"""
_pacparser.enable_microsoft_extensions()
| Python |
import shutil
import sys
from distutils import sysconfig
def main():
if sys.platform == 'win32':
shutil.rmtree('%s\\pacparser' % sysconfig.get_python_lib(),
ignore_errors=True)
shutil.copytree('pacparser', '%s\\pacparser' % sysconfig.get_python_lib())
else:
print 'This script should be used only on Win32 systems.'
if __name__ == '__main__':
main()
| Python |
# Copyright (C) 2007 Manu Garg.
# Author: Manu Garg <manugarg@gmail.com>
#
# pacparser is a library that provides methods to parse proxy auto-config
# (PAC) files. Please read README file included with this package for more
# information about this library.
#
# pacparser is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# pacparser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
"""
Wrapper script around python module Makefiles. This script take care of
identifying python setup and setting up some environment variables needed by
Makefiles.
"""
import sys
import os
from distutils import sysconfig
from distutils.core import setup
from distutils.core import Extension
def main():
# Use Makefile for windows. distutils doesn't work well with windows.
if sys.platform == 'win32':
pydll = ('C:\windows\system32\python%s.dll' %
sysconfig.get_config_vars('VERSION')[0])
os.system('make -f Makefile.win32 %s PY_HOME="%s" PY_DLL="%s"' %
(' '.join(sys.argv[1:]), sys.prefix, pydll))
return
pacparser_module = Extension('_pacparser',
include_dirs = ['../spidermonkey/js/src', '..'],
sources = ['pacparser_py.c'],
extra_objects = ['../pacparser.o', '../libjs.a'])
setup (name = 'pacparser',
version = '1',
description = 'Pacparser package',
author = 'Manu Garg',
author_email = 'manugarg@gmail.com',
url = 'http://code.google.com/p/pacparser',
long_description = 'python library to parse proxy auto-config (PAC) '
'files.',
license = 'LGPL',
ext_package = 'pacparser',
ext_modules = [pacparser_module],
py_modules = ['pacparser.__init__'])
if __name__ == '__main__':
main()
| Python |
# Copyright (C) 2007 Manu Garg.
# Author: Manu Garg <manugarg@gmail.com>
#
# pacparser is a library that provides methods to parse proxy auto-config
# (PAC) files. Please read README file included with this package for more
# information about this library.
#
# pacparser is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# pacparser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import getopt
import glob
import os
import sys
def runtests(pacfile, testdata, tests_dir):
py_ver = '.'.join([str(x) for x in sys.version_info[0:2]])
if sys.platform == 'win32':
pacparser_module_path = os.path.join(tests_dir, '..', 'src', 'pymod', 'dist')
if os.path.exists(os.path.join(pacparser_module_path, '_pacparser.pyd')):
raise Exception('Tests failed. Could not determine pacparser path.')
else:
try:
pacparser_module_path = glob.glob(os.path.join(
tests_dir, '..', 'src', 'pymod', 'build', 'lib*%s' % py_ver))[0]
except Exception:
raise Exception('Tests failed. Could not determine pacparser path.')
if 'DEBUG' in os.environ: print('Pacparser module path: %s' %
pacparser_module_path)
sys.path.insert(0, pacparser_module_path)
try:
import pacparser
except ImportError:
raise Exception('Tests failed. Could not import pacparser.')
if 'DEBUG' in os.environ: print('Imported pacparser module: %s' %
sys.modules['pacparser'])
f = open(testdata)
for line in f:
comment = ''
if '#' in line:
comment = line.split('#', 1)[1]
line = line.split('#', 1)[0].strip()
if not line:
continue
if ('NO_INTERNET' in os.environ and os.environ['NO_INTERNET'] and
'INTERNET_REQUIRED' in comment):
continue
if 'DEBUG' in os.environ: print(line)
(params, expected_result) = line.strip().split('|')
args = dict(getopt.getopt(params.split(), 'eu:c:')[0])
if '-e' in args:
pacparser.enable_microsoft_extensions()
if '-c' in args:
pacparser.setmyip(args['-c'])
pacparser.init()
pacparser.parse_pac_file(pacfile)
result = pacparser.find_proxy(args['-u'])
pacparser.cleanup()
if result != expected_result:
raise Exception('Tests failed. Got "%s", expected "%s"' % (result, expected_result))
print('All tests were successful.')
def main():
tests_dir = os.path.dirname(os.path.join(os.getcwd(), sys.argv[0]))
pacfile = os.path.join(tests_dir, 'proxy.pac')
testdata = os.path.join(tests_dir, 'testdata')
runtests(pacfile, testdata, tests_dir)
if __name__ == '__main__':
main()
| Python |
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Parser for Unicode data files (as distributed by unicode.org)."""
import os
import re
import urllib2
# Directory or URL where Unicode tables reside.
_UNICODE_DIR = "http://www.unicode.org/Public/6.0.0/ucd"
# Largest valid Unicode code value.
_RUNE_MAX = 0x10FFFF
class Error(Exception):
"""Unicode error base class."""
class InputError(Error):
"""Unicode input error class. Raised on invalid input."""
def _UInt(s):
"""Converts string to Unicode code point ('263A' => 0x263a).
Args:
s: string to convert
Returns:
Unicode code point
Raises:
InputError: the string is not a valid Unicode value.
"""
try:
v = int(s, 16)
except ValueError:
v = -1
if len(s) < 4 or len(s) > 6 or v < 0 or v > _RUNE_MAX:
raise InputError("invalid Unicode value %s" % (s,))
return v
def _URange(s):
"""Converts string to Unicode range.
'0001..0003' => [1, 2, 3].
'0001' => [1].
Args:
s: string to convert
Returns:
Unicode range
Raises:
InputError: the string is not a valid Unicode range.
"""
a = s.split("..")
if len(a) == 1:
return [_UInt(a[0])]
if len(a) == 2:
lo = _UInt(a[0])
hi = _UInt(a[1])
if lo < hi:
return range(lo, hi + 1)
raise InputError("invalid Unicode range %s" % (s,))
def _UStr(v):
"""Converts Unicode code point to hex string.
0x263a => '0x263A'.
Args:
v: code point to convert
Returns:
Unicode string
Raises:
InputError: the argument is not a valid Unicode value.
"""
if v < 0 or v > _RUNE_MAX:
raise InputError("invalid Unicode value %s" % (v,))
return "0x%04X" % (v,)
def _ParseContinue(s):
"""Parses a Unicode continuation field.
These are of the form '<Name, First>' or '<Name, Last>'.
Instead of giving an explicit range in a single table entry,
some Unicode tables use two entries, one for the first
code value in the range and one for the last.
The first entry's description is '<Name, First>' instead of 'Name'
and the second is '<Name, Last>'.
'<Name, First>' => ('Name', 'First')
'<Name, Last>' => ('Name', 'Last')
'Anything else' => ('Anything else', None)
Args:
s: continuation field string
Returns:
pair: name and ('First', 'Last', or None)
"""
match = re.match("<(.*), (First|Last)>", s)
if match is not None:
return match.groups()
return (s, None)
def ReadUnicodeTable(filename, nfields, doline):
"""Generic Unicode table text file reader.
The reader takes care of stripping out comments and also
parsing the two different ways that the Unicode tables specify
code ranges (using the .. notation and splitting the range across
multiple lines).
Each non-comment line in the table is expected to have the given
number of fields. The first field is known to be the Unicode value
and the second field its description.
The reader calls doline(codes, fields) for each entry in the table.
If fn raises an exception, the reader prints that exception,
prefixed with the file name and line number, and continues
processing the file. When done with the file, the reader re-raises
the first exception encountered during the file.
Arguments:
filename: the Unicode data file to read, or a file-like object.
nfields: the number of expected fields per line in that file.
doline: the function to call for each table entry.
Raises:
InputError: nfields is invalid (must be >= 2).
"""
if nfields < 2:
raise InputError("invalid number of fields %d" % (nfields,))
if type(filename) == str:
if filename.startswith("http://"):
fil = urllib2.urlopen(filename)
else:
fil = open(filename, "r")
else:
fil = filename
first = None # first code in multiline range
expect_last = None # tag expected for "Last" line in multiline range
lineno = 0 # current line number
for line in fil:
lineno += 1
try:
# Chop # comments and white space; ignore empty lines.
sharp = line.find("#")
if sharp >= 0:
line = line[:sharp]
line = line.strip()
if not line:
continue
# Split fields on ";", chop more white space.
# Must have the expected number of fields.
fields = [s.strip() for s in line.split(";")]
if len(fields) != nfields:
raise InputError("wrong number of fields %d %d - %s" %
(len(fields), nfields, line))
# The Unicode text files have two different ways
# to list a Unicode range. Either the first field is
# itself a range (0000..FFFF), or the range is split
# across two lines, with the second field noting
# the continuation.
codes = _URange(fields[0])
(name, cont) = _ParseContinue(fields[1])
if expect_last is not None:
# If the last line gave the First code in a range,
# this one had better give the Last one.
if (len(codes) != 1 or codes[0] <= first or
cont != "Last" or name != expect_last):
raise InputError("expected Last line for %s" %
(expect_last,))
codes = range(first, codes[0] + 1)
first = None
expect_last = None
fields[0] = "%04X..%04X" % (codes[0], codes[-1])
fields[1] = name
elif cont == "First":
# Otherwise, if this is the First code in a range,
# remember it and go to the next line.
if len(codes) != 1:
raise InputError("bad First line: range given")
expect_last = name
first = codes[0]
continue
doline(codes, fields)
except Exception, e:
print "%s:%d: %s" % (filename, lineno, e)
raise
if expect_last is not None:
raise InputError("expected Last line for %s; got EOF" %
(expect_last,))
def CaseGroups(unicode_dir=_UNICODE_DIR):
"""Returns list of Unicode code groups equivalent under case folding.
Each group is a sorted list of code points,
and the list of groups is sorted by first code point
in the group.
Args:
unicode_dir: Unicode data directory
Returns:
list of Unicode code groups
"""
# Dict mapping lowercase code point to fold-equivalent group.
togroup = {}
def DoLine(codes, fields):
"""Process single CaseFolding.txt line, updating togroup."""
(_, foldtype, lower, _) = fields
if foldtype not in ("C", "S"):
return
lower = _UInt(lower)
togroup.setdefault(lower, [lower]).extend(codes)
ReadUnicodeTable(unicode_dir+"/CaseFolding.txt", 4, DoLine)
groups = togroup.values()
for g in groups:
g.sort()
groups.sort()
return togroup, groups
def Scripts(unicode_dir=_UNICODE_DIR):
"""Returns dict mapping script names to code lists.
Args:
unicode_dir: Unicode data directory
Returns:
dict mapping script names to code lists
"""
scripts = {}
def DoLine(codes, fields):
"""Process single Scripts.txt line, updating scripts."""
(_, name) = fields
scripts.setdefault(name, []).extend(codes)
ReadUnicodeTable(unicode_dir+"/Scripts.txt", 2, DoLine)
return scripts
def Categories(unicode_dir=_UNICODE_DIR):
"""Returns dict mapping category names to code lists.
Args:
unicode_dir: Unicode data directory
Returns:
dict mapping category names to code lists
"""
categories = {}
def DoLine(codes, fields):
"""Process single UnicodeData.txt line, updating categories."""
category = fields[2]
categories.setdefault(category, []).extend(codes)
# Add codes from Lu into L, etc.
if len(category) > 1:
short = category[0]
categories.setdefault(short, []).extend(codes)
ReadUnicodeTable(unicode_dir+"/UnicodeData.txt", 15, DoLine)
return categories
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Unittest for the util/regexp/re2/unicode.py module."""
import os
import StringIO
from google3.pyglib import flags
from google3.testing.pybase import googletest
from google3.util.regexp.re2 import unicode
_UNICODE_DIR = os.path.join(flags.FLAGS.test_srcdir, "google3", "third_party",
"unicode", "ucd-5.1.0")
class ConvertTest(googletest.TestCase):
"""Test the conversion functions."""
def testUInt(self):
self.assertEquals(0x0000, unicode._UInt("0000"))
self.assertEquals(0x263A, unicode._UInt("263A"))
self.assertEquals(0x10FFFF, unicode._UInt("10FFFF"))
self.assertRaises(unicode.InputError, unicode._UInt, "263")
self.assertRaises(unicode.InputError, unicode._UInt, "263AAAA")
self.assertRaises(unicode.InputError, unicode._UInt, "110000")
def testURange(self):
self.assertEquals([1, 2, 3], unicode._URange("0001..0003"))
self.assertEquals([1], unicode._URange("0001"))
self.assertRaises(unicode.InputError, unicode._URange, "0001..0003..0005")
self.assertRaises(unicode.InputError, unicode._URange, "0003..0001")
self.assertRaises(unicode.InputError, unicode._URange, "0001..0001")
def testUStr(self):
self.assertEquals("0x263A", unicode._UStr(0x263a))
self.assertEquals("0x10FFFF", unicode._UStr(0x10FFFF))
self.assertRaises(unicode.InputError, unicode._UStr, 0x110000)
self.assertRaises(unicode.InputError, unicode._UStr, -1)
_UNICODE_TABLE = """# Commented line, should be ignored.
# The next line is blank and should be ignored.
0041;Capital A;Line 1
0061..007A;Lowercase;Line 2
1F00;<Greek, First>;Ignored
1FFE;<Greek, Last>;Line 3
10FFFF;Runemax;Line 4
0000;Zero;Line 5
"""
_BAD_TABLE1 = """
111111;Not a code point;
"""
_BAD_TABLE2 = """
0000;<Zero, First>;Missing <Zero, Last>
"""
_BAD_TABLE3 = """
0010..0001;Bad range;
"""
class AbortError(Exception):
"""Function should not have been called."""
def Abort():
raise AbortError("Abort")
def StringTable(s, n, f):
unicode.ReadUnicodeTable(StringIO.StringIO(s), n, f)
class ReadUnicodeTableTest(googletest.TestCase):
"""Test the ReadUnicodeTable function."""
def testSimpleTable(self):
ncall = [0] # can't assign to ordinary int in DoLine
def DoLine(codes, fields):
self.assertEquals(3, len(fields))
ncall[0] += 1
self.assertEquals("Line %d" % (ncall[0],), fields[2])
if ncall[0] == 1:
self.assertEquals([0x0041], codes)
self.assertEquals("0041", fields[0])
self.assertEquals("Capital A", fields[1])
elif ncall[0] == 2:
self.assertEquals(range(0x0061, 0x007A + 1), codes)
self.assertEquals("0061..007A", fields[0])
self.assertEquals("Lowercase", fields[1])
elif ncall[0] == 3:
self.assertEquals(range(0x1F00, 0x1FFE + 1), codes)
self.assertEquals("1F00..1FFE", fields[0])
self.assertEquals("Greek", fields[1])
elif ncall[0] == 4:
self.assertEquals([0x10FFFF], codes)
self.assertEquals("10FFFF", fields[0])
self.assertEquals("Runemax", fields[1])
elif ncall[0] == 5:
self.assertEquals([0x0000], codes)
self.assertEquals("0000", fields[0])
self.assertEquals("Zero", fields[1])
StringTable(_UNICODE_TABLE, 3, DoLine)
self.assertEquals(5, ncall[0])
def testErrorTables(self):
self.assertRaises(unicode.InputError, StringTable, _UNICODE_TABLE, 4, Abort)
self.assertRaises(unicode.InputError, StringTable, _UNICODE_TABLE, 2, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE1, 3, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE2, 3, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE3, 3, Abort)
class ParseContinueTest(googletest.TestCase):
"""Test the ParseContinue function."""
def testParseContinue(self):
self.assertEquals(("Private Use", "First"),
unicode._ParseContinue("<Private Use, First>"))
self.assertEquals(("Private Use", "Last"),
unicode._ParseContinue("<Private Use, Last>"))
self.assertEquals(("<Private Use, Blah>", None),
unicode._ParseContinue("<Private Use, Blah>"))
class CaseGroupsTest(googletest.TestCase):
"""Test the CaseGroups function (and the CaseFoldingReader)."""
def FindGroup(self, c):
if type(c) == str:
c = ord(c)
for g in self.groups:
if c in g:
return g
return None
def testCaseGroups(self):
self.groups = unicode.CaseGroups(unicode_dir=_UNICODE_DIR)
self.assertEquals([ord("A"), ord("a")], self.FindGroup("a"))
self.assertEquals(None, self.FindGroup("0"))
class ScriptsTest(googletest.TestCase):
"""Test the Scripts function (and the ScriptsReader)."""
def FindScript(self, c):
if type(c) == str:
c = ord(c)
for script, codes in self.scripts.items():
for code in codes:
if c == code:
return script
return None
def testScripts(self):
self.scripts = unicode.Scripts(unicode_dir=_UNICODE_DIR)
self.assertEquals("Latin", self.FindScript("a"))
self.assertEquals("Common", self.FindScript("0"))
self.assertEquals(None, self.FindScript(0xFFFE))
class CategoriesTest(googletest.TestCase):
"""Test the Categories function (and the UnicodeDataReader)."""
def FindCategory(self, c):
if type(c) == str:
c = ord(c)
short = None
for category, codes in self.categories.items():
for code in codes:
if code == c:
# prefer category Nd over N
if len(category) > 1:
return category
if short == None:
short = category
return short
def testCategories(self):
self.categories = unicode.Categories(unicode_dir=_UNICODE_DIR)
self.assertEquals("Ll", self.FindCategory("a"))
self.assertEquals("Nd", self.FindCategory("0"))
self.assertEquals("Lo", self.FindCategory(0xAD00)) # in First, Last range
self.assertEquals(None, self.FindCategory(0xFFFE))
self.assertEquals("Lo", self.FindCategory(0x8B5A))
self.assertEquals("Lo", self.FindCategory(0x6C38))
self.assertEquals("Lo", self.FindCategory(0x92D2))
self.assertTrue(ord("a") in self.categories["L"])
self.assertTrue(ord("0") in self.categories["N"])
self.assertTrue(0x8B5A in self.categories["L"])
self.assertTrue(0x6C38 in self.categories["L"])
self.assertTrue(0x92D2 in self.categories["L"])
def main():
googletest.main()
if __name__ == "__main__":
main()
| Python |
# coding=utf-8
# (The line above is necessary so that I can use 世界 in the
# *comment* below without Python getting all bent out of shape.)
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
In addition to the new commands, this extension introduces
the file pattern syntax @nnnnnn, where nnnnnn is a change list
number, to mean the files included in that change list, which
must be associated with the current client.
For example, if change 123456 contains the files x.go and y.go,
"hg diff @123456" is equivalent to"hg diff x.go y.go".
'''
from mercurial import cmdutil, commands, hg, util, error, match, discovery
from mercurial.node import nullrev, hex, nullid, short
import os, re, time
import stat
import subprocess
import threading
from HTMLParser import HTMLParser
# The standard 'json' package is new in Python 2.6.
# Before that it was an external package named simplejson.
try:
# Standard location in 2.6 and beyond.
import json
except Exception, e:
try:
# Conventional name for earlier package.
import simplejson as json
except:
try:
# Was also bundled with django, which is commonly installed.
from django.utils import simplejson as json
except:
# We give up.
raise e
try:
hgversion = util.version()
except:
from mercurial.version import version as v
hgversion = v.get_version()
# in Mercurial 1.9 the cmdutil.match and cmdutil.revpair moved to scmutil
if hgversion >= '1.9':
from mercurial import scmutil
else:
scmutil = cmdutil
oldMessage = """
The code review extension requires Mercurial 1.3 or newer.
To install a new Mercurial,
sudo easy_install mercurial
works on most systems.
"""
linuxMessage = """
You may need to clear your current Mercurial installation by running:
sudo apt-get remove mercurial mercurial-common
sudo rm -rf /etc/mercurial
"""
if hgversion < '1.3':
msg = oldMessage
if os.access("/etc/mercurial", 0):
msg += linuxMessage
raise util.Abort(msg)
def promptyesno(ui, msg):
# Arguments to ui.prompt changed between 1.3 and 1.3.1.
# Even so, some 1.3.1 distributions seem to have the old prompt!?!?
# What a terrible way to maintain software.
try:
return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
except AttributeError:
return ui.prompt(msg, ["&yes", "&no"], "y") != "n"
def incoming(repo, other):
fui = FakeMercurialUI()
ret = commands.incoming(fui, repo, *[other.path], **{'bundle': '', 'force': False})
if ret and ret != 1:
raise util.Abort(ret)
out = fui.output
return out
def outgoing(repo):
fui = FakeMercurialUI()
ret = commands.outgoing(fui, repo, *[], **{})
if ret and ret != 1:
raise util.Abort(ret)
out = fui.output
return out
# To experiment with Mercurial in the python interpreter:
# >>> repo = hg.repository(ui.ui(), path = ".")
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
server = "codereview.appspot.com"
server_url_base = None
defaultcc = None
contributors = {}
missing_codereview = None
real_rollback = None
releaseBranch = None
#######################################################################
# RE: UNICODE STRING HANDLING
#
# Python distinguishes between the str (string of bytes)
# and unicode (string of code points) types. Most operations
# work on either one just fine, but some (like regexp matching)
# require unicode, and others (like write) require str.
#
# As befits the language, Python hides the distinction between
# unicode and str by converting between them silently, but
# *only* if all the bytes/code points involved are 7-bit ASCII.
# This means that if you're not careful, your program works
# fine on "hello, world" and fails on "hello, 世界". And of course,
# the obvious way to be careful - use static types - is unavailable.
# So the only way is trial and error to find where to put explicit
# conversions.
#
# Because more functions do implicit conversion to str (string of bytes)
# than do implicit conversion to unicode (string of code points),
# the convention in this module is to represent all text as str,
# converting to unicode only when calling a unicode-only function
# and then converting back to str as soon as possible.
def typecheck(s, t):
if type(s) != t:
raise util.Abort("type check failed: %s has type %s != %s" % (repr(s), type(s), t))
# If we have to pass unicode instead of str, ustr does that conversion clearly.
def ustr(s):
typecheck(s, str)
return s.decode("utf-8")
# Even with those, Mercurial still sometimes turns unicode into str
# and then tries to use it as ascii. Change Mercurial's default.
def set_mercurial_encoding_to_utf8():
from mercurial import encoding
encoding.encoding = 'utf-8'
set_mercurial_encoding_to_utf8()
# Even with those we still run into problems.
# I tried to do things by the book but could not convince
# Mercurial to let me check in a change with UTF-8 in the
# CL description or author field, no matter how many conversions
# between str and unicode I inserted and despite changing the
# default encoding. I'm tired of this game, so set the default
# encoding for all of Python to 'utf-8', not 'ascii'.
def default_to_utf8():
import sys
stdout, __stdout__ = sys.stdout, sys.__stdout__
reload(sys) # site.py deleted setdefaultencoding; get it back
sys.stdout, sys.__stdout__ = stdout, __stdout__
sys.setdefaultencoding('utf-8')
default_to_utf8()
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
class CL(object):
def __init__(self, name):
typecheck(name, str)
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
self.copied_from = None # None means current user
self.mailed = False
self.private = False
self.lgtm = []
def DiskText(self):
cl = self
s = ""
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n\n"
if cl.private:
s += "Private: " + str(self.private) + "\n"
s += "Mailed: " + str(self.mailed) + "\n"
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
typecheck(s, str)
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
if cl.private:
s += "Private: True\n"
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
if cl.local or cl.name == "new":
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
typecheck(s, str)
return s
def PendingText(self):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
if cl.copied_from:
s += "\tAuthor: " + cl.copied_from + "\n"
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
for (who, line) in cl.lgtm:
s += "\t\t" + who + ": " + line + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
typecheck(s, str)
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo, gofmt_just_warn=True, creating=True)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
if sys.platform == "win32" and os.path.isfile(path):
os.remove(path)
os.rename(path+'!', path)
if self.web and not self.copied_from:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc),
private=self.private)
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
typecheck(s, str)
return s
def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False, creating=False, quiet=False):
if not self.files and not creating:
ui.warn("no files in change list\n")
if ui.configbool("codereview", "force_gofmt", True) and gofmt:
CheckFormat(ui, repo, self.files, just_warn=gofmt_just_warn)
set_status("uploading CL metadata + diffs")
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
]
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = None
# We do not include files when creating the issue,
# because we want the patch sets to record the repository
# and base revision they are diffs against. We use the patch
# set message for that purpose, but there is no message with
# the first patch set. Instead the message gets used as the
# new CL's overall subject. So omit the diffs when creating
# and then we'll run an immediate upload.
# This has the effect that every CL begins with an empty "Patch set 1".
if self.files and not creating:
vcs = MercurialVCS(upload_options, ui, repo)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
else:
uploaded_diff_file = [("data", "data.diff", emptydiff)]
if vcs and self.name != "new":
form_fields.append(("subject", "diff -r " + vcs.base_rev + " " + getremote(ui, repo, {}).path))
else:
# First upload sets the subject for the CL itself.
form_fields.append(("subject", self.Subject()))
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
if response_body.startswith("Issue updated.") and quiet:
pass
else:
ui.status(msg + "\n")
set_status("uploaded CL metadata + diffs")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
set_status("uploading patches")
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
if vcs:
set_status("uploading base files")
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if send_mail:
set_status("sending mail")
MySend("/" + issue + "/mail", payload="")
self.web = True
set_status("flushing changes to disk")
self.Flush(ui, repo)
return
def Mail(self, ui, repo):
pmsg = "Hello " + JoinComma(self.reviewer)
if self.cc:
pmsg += " (cc: %s)" % (', '.join(self.cc),)
pmsg += ",\n"
pmsg += "\n"
repourl = getremote(ui, repo, {}).path
if not self.mailed:
pmsg += "I'd like you to review this change to\n" + repourl + "\n"
else:
pmsg += "Please take another look.\n"
typecheck(pmsg, str)
PostMessage(ui, self.name, pmsg, subject=self.Subject())
self.mailed = True
self.Flush(ui, repo)
def GoodCLName(name):
typecheck(name, str)
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
typecheck(text, str)
typecheck(name, str)
sname = None
lineno = 0
sections = {
'Author': '',
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
'Mailed': '',
'Private': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
if sections['Author']:
cl.copied_from = sections['Author']
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
line = line.strip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if sections['Mailed'] != 'False':
# Odd default, but avoids spurious mailings when
# reading old CLs that do not have a Mailed: line.
# CLs created with this update will always have
# Mailed: False on disk.
cl.mailed = True
if sections['Private'] in ('True', 'true', 'Yes', 'yes'):
cl.private = True
if cl.desc == '<enter description here>':
cl.desc = ''
return cl, 0, ''
def SplitCommaSpace(s):
typecheck(s, str)
s = s.strip()
if s == "":
return []
return re.split(", *", s)
def CutDomain(s):
typecheck(s, str)
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
for s in l:
typecheck(s, str)
return ", ".join(l)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
def IsLocalCL(ui, repo, name):
return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
typecheck(name, str)
set_status("loading CL " + name)
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
set_status("getting issue metadata from web")
d = JSONGet(ui, "/api/" + name + "?messages=true")
set_status(None)
if d is None:
return None, "cannot load CL %s from server" % (name,)
if 'owner_email' not in d or 'issue' not in d or str(d['issue']) != name:
return None, "malformed response loading CL data from code review server"
cl.dict = d
cl.reviewer = d.get('reviewers', [])
cl.cc = d.get('cc', [])
if cl.local and cl.copied_from and cl.desc:
# local copy of CL written by someone else
# and we saved a description. use that one,
# so that committers can edit the description
# before doing hg submit.
pass
else:
cl.desc = d.get('description', "")
cl.url = server_url_base + name
cl.web = True
cl.private = d.get('private', False) != False
cl.lgtm = []
for m in d.get('messages', []):
if m.get('approval', False) == True:
who = re.sub('@.*', '', m.get('sender', ''))
text = re.sub("\n(.|\n)*", '', m.get('text', ''))
cl.lgtm.append((who, text))
set_status("loaded CL " + name)
return cl, ''
global_status = None
def set_status(s):
# print >>sys.stderr, "\t", time.asctime(), s
global global_status
global_status = s
class StatusThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# pause a reasonable amount of time before
# starting to display status messages, so that
# most hg commands won't ever see them.
time.sleep(30)
# now show status every 15 seconds
while True:
time.sleep(15 - time.time() % 15)
s = global_status
if s is None:
continue
if s == "":
s = "(unknown status)"
print >>sys.stderr, time.asctime(), s
def start_status_thread():
t = StatusThread()
t.setDaemon(True) # allowed to exit if t is still running
t.start()
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
active = []
first = True
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
if web and first:
# first request: wait in case it needs to authenticate
# otherwise we get lots of user/password prompts
# running in parallel.
t.join()
if t.cl:
m[t.cl.name] = t.cl
first = False
else:
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
typecheck(url, str)
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
typecheck(dir, str)
return dir
# Turn leading tabs into spaces, so that the common white space
# prefix doesn't get confused when people's editors write out
# some lines with spaces, some with tabs. Only a heuristic
# (some editors don't use 8 spaces either) but a useful one.
def TabsToSpaces(line):
i = 0
while i < len(line) and line[i] == '\t':
i += 1
return ' '*(8*i) + line[i:]
# Strip maximal common leading white space prefix from text
def StripCommon(text):
typecheck(text, str)
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
line = TabsToSpaces(line)
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
line = TabsToSpaces(line)
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
typecheck(t, str)
return t
# Indent text with indent.
def Indent(text, indent):
typecheck(text, str)
typecheck(indent, str)
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
typecheck(t, str)
return t
# Return the first line of l
def line1(text):
typecheck(text, str)
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
#######################################################################
# Mercurial helper functions
# Get effective change nodes taking into account applied MQ patches
def effective_revpair(repo):
try:
return scmutil.revpair(repo, ['qparent'])
except:
return scmutil.revpair(repo, None)
# Return list of changed files in repository that match pats.
# Warn about patterns that did not match.
def matchpats(ui, repo, pats, opts):
matcher = scmutil.match(repo, pats, opts)
node1, node2 = effective_revpair(repo)
modified, added, removed, deleted, unknown, ignored, clean = repo.status(node1, node2, matcher, ignored=True, clean=True, unknown=True)
return (modified, added, removed, deleted, unknown, ignored, clean)
# Return list of changed files in repository that match pats.
# The patterns came from the command line, so we warn
# if they have no effect or cannot be understood.
def ChangedFiles(ui, repo, pats, opts, taken=None):
taken = taken or {}
# Run each pattern separately so that we can warn about
# patterns that didn't do anything useful.
for p in pats:
modified, added, removed, deleted, unknown, ignored, clean = matchpats(ui, repo, [p], opts)
redo = False
for f in unknown:
promptadd(ui, repo, f)
redo = True
for f in deleted:
promptremove(ui, repo, f)
redo = True
if redo:
modified, added, removed, deleted, unknown, ignored, clean = matchpats(ui, repo, [p], opts)
for f in modified + added + removed:
if f in taken:
ui.warn("warning: %s already in CL %s\n" % (f, taken[f].name))
if not modified and not added and not removed:
ui.warn("warning: %s did not match any modified files\n" % (p,))
# Again, all at once (eliminates duplicates)
modified, added, removed = matchpats(ui, repo, pats, opts)[:3]
l = modified + added + removed
l.sort()
if taken:
l = Sub(l, taken.keys())
return l
# Return list of changed files in repository that match pats and still exist.
def ChangedExistingFiles(ui, repo, pats, opts):
modified, added = matchpats(ui, repo, pats, opts)[:2]
l = modified + added
l.sort()
return l
# Return list of files claimed by existing CLs
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats, opts):
return ChangedFiles(ui, repo, pats, opts, taken=Taken(ui, repo))
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
def getremote(ui, repo, opts):
# save $http_proxy; creating the HTTP repo object will
# delete it in an attempt to "help"
proxy = os.environ.get('http_proxy')
source = hg.parseurl(ui.expandpath("default"), None)[0]
try:
remoteui = hg.remoteui # hg 1.6
except:
remoteui = cmdutil.remoteui
other = hg.repository(remoteui(repo, opts), source)
if proxy is not None:
os.environ['http_proxy'] = proxy
return other
desc_re = '^(.+: |(tag )?(release|weekly)\.|fix build|undo CL)'
desc_msg = '''Your CL description appears not to use the standard form.
The first line of your change description is conventionally a
one-line summary of the change, prefixed by the primary affected package,
and is used as the subject for code review mail; the rest of the description
elaborates.
Examples:
encoding/rot13: new package
math: add IsInf, IsNaN
net: fix cname in LookupHost
unicode: update to Unicode 5.0.2
'''
def promptremove(ui, repo, f):
if promptyesno(ui, "hg remove %s (y/n)?" % (f,)):
if commands.remove(ui, repo, 'path:'+f) != 0:
ui.warn("error removing %s" % (f,))
def promptadd(ui, repo, f):
if promptyesno(ui, "hg add %s (y/n)?" % (f,)):
if commands.add(ui, repo, 'path:'+f) != 0:
ui.warn("error adding %s" % (f,))
def EditCL(ui, repo, cl):
set_status(None) # do not show status
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
# We can't trust Mercurial + Python not to die before making the change,
# so, by popular demand, just scribble the most recent CL edit into
# $(hg root)/last-change so that if Mercurial does die, people
# can look there for their work.
try:
f = open(repo.root+"/last-change", "w")
f.write(s)
f.close()
except:
pass
clx, line, err = ParseCL(s, cl.name)
if err != '':
if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
return "change list not modified"
continue
# Check description.
if clx.desc == '':
if promptyesno(ui, "change list should have a description\nre-edit (y/n)?"):
continue
elif re.search('<enter reason for undo>', clx.desc):
if promptyesno(ui, "change list description omits reason for undo\nre-edit (y/n)?"):
continue
elif not re.match(desc_re, clx.desc.split('\n')[0]):
if promptyesno(ui, desc_msg + "re-edit (y/n)?"):
continue
# Check file list for files that need to be hg added or hg removed
# or simply aren't understood.
pats = ['path:'+f for f in clx.files]
modified, added, removed, deleted, unknown, ignored, clean = matchpats(ui, repo, pats, {})
files = []
for f in clx.files:
if f in modified or f in added or f in removed:
files.append(f)
continue
if f in deleted:
promptremove(ui, repo, f)
files.append(f)
continue
if f in unknown:
promptadd(ui, repo, f)
files.append(f)
continue
if f in ignored:
ui.warn("error: %s is excluded by .hgignore; omitting\n" % (f,))
continue
if f in clean:
ui.warn("warning: %s is listed in the CL but unchanged\n" % (f,))
files.append(f)
continue
p = repo.root + '/' + f
if os.path.isfile(p):
ui.warn("warning: %s is a file but not known to hg\n" % (f,))
files.append(f)
continue
if os.path.isdir(p):
ui.warn("error: %s is a directory, not a file; omitting\n" % (f,))
continue
ui.warn("error: %s does not exist; omitting\n" % (f,))
clx.files = files
cl.desc = clx.desc
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
cl.private = clx.private
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts, defaultcc=None):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
if err != "":
return None, err
else:
cl = CL("new")
cl.local = True
cl.files = ChangedFiles(ui, repo, pats, opts, taken=Taken(ui, repo))
if not cl.files:
return None, "no files changed"
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if defaultcc:
cl.cc = Add(cl.cc, defaultcc)
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
# reposetup replaces cmdutil.match with this wrapper,
# which expands the syntax @clnumber to mean the files
# in that CL.
original_match = None
global_repo = None
global_ui = None
def ReplacementForCmdutilMatch(ctx, pats=None, opts=None, globbed=False, default='relpath'):
taken = []
files = []
pats = pats or []
opts = opts or {}
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if not GoodCLName(clname):
raise util.Abort("invalid CL name " + clname)
cl, err = LoadCL(global_repo.ui, global_repo, clname, web=False)
if err != '':
raise util.Abort("loading CL " + clname + ": " + err)
if not cl.files:
raise util.Abort("no files in CL " + clname)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
# work-around for http://selenic.com/hg/rev/785bbc8634f8
if hgversion >= '1.9' and not hasattr(ctx, 'match'):
ctx = ctx[None]
return original_match(ctx, pats=pats, opts=opts, globbed=globbed, default=default)
def RelativePath(path, cwd):
n = len(cwd)
if path.startswith(cwd) and path[n] == '/':
return path[n+1:]
return path
def CheckFormat(ui, repo, files, just_warn=False):
set_status("running gofmt")
CheckGofmt(ui, repo, files, just_warn)
CheckTabfmt(ui, repo, files, just_warn)
# Check that gofmt run on the list of files does not change them
def CheckGofmt(ui, repo, files, just_warn):
files = [f for f in files if (f.startswith('src/') or f.startswith('test/bench/')) and f.endswith('.go')]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
if not files:
return
try:
cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=sys.platform != "win32")
cmd.stdin.close()
except:
raise util.Abort("gofmt: " + ExceptionDetail())
data = cmd.stdout.read()
errors = cmd.stderr.read()
cmd.wait()
set_status("done with gofmt")
if len(errors) > 0:
ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
return
if len(data) > 0:
msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise util.Abort(msg)
return
# Check that *.[chys] files indent using tabs.
def CheckTabfmt(ui, repo, files, just_warn):
files = [f for f in files if f.startswith('src/') and re.search(r"\.[chys]$", f)]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
badfiles = []
for f in files:
try:
for line in open(f, 'r'):
# Four leading spaces is enough to complain about,
# except that some Plan 9 code uses four spaces as the label indent,
# so allow that.
if line.startswith(' ') and not re.match(' [A-Za-z0-9_]+:', line):
badfiles.append(f)
break
except:
# ignore cannot open file, etc.
pass
if len(badfiles) > 0:
msg = "these files use spaces for indentation (use tabs instead):\n\t" + "\n\t".join(badfiles)
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise util.Abort(msg)
return
#######################################################################
# Mercurial commands
# every command must take a ui and and repo as arguments.
# opts is a dict where you can find other command line flags
#
# Other parameters are taken in order from items on the command line that
# don't start with a dash. If no default value is given in the parameter list,
# they are required.
#
def change(ui, repo, *pats, **opts):
"""create, edit or delete a change list
Create, edit or delete a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
Deleting a change with the -d or -D flag does not affect
the contents of the files listed in that change. To revert
the files listed in a change, use
hg revert @123456
before running hg change -d 123456.
"""
if missing_codereview:
return missing_codereview
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
return "cannot specify CL name and file patterns"
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
return err
if not cl.local and (opts["stdin"] or not opts["stdout"]):
return "cannot change non-local CL " + name
else:
if repo[None].branch() != "default":
return "cannot run hg change outside default branch"
name = "new"
cl = CL("new")
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, opts, taken=Taken(ui, repo))
if opts["delete"] or opts["deletelocal"]:
if opts["delete"] and opts["deletelocal"]:
return "cannot use -d and -D together"
flag = "-d"
if opts["deletelocal"]:
flag = "-D"
if name == "new":
return "cannot use "+flag+" with file patterns"
if opts["stdin"] or opts["stdout"]:
return "cannot use "+flag+" with -i or -o"
if not cl.local:
return "cannot change non-local CL " + name
if opts["delete"]:
if cl.copied_from:
return "original author must delete CL; hg change -D will remove locally"
PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed)
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
return "error parsing change list: line %d: %s" % (line, err)
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if clx.private != cl.private:
cl.private = clx.private
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
return err
dirty[cl] = True
for d, _ in dirty.items():
name = d.name
d.Flush(ui, repo)
if name == "new":
d.Upload(ui, repo, quiet=True)
if opts["stdout"]:
ui.write(cl.EditorText())
elif opts["pending"]:
ui.write(cl.PendingText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
def code_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
if missing_codereview:
return missing_codereview
MySend(None)
def clpatch(ui, repo, clname, **opts):
"""import a patch from the code review server
Imports a patch from the code review server into the local client.
If the local client has already modified any of the files that the
patch modifies, this command will refuse to apply the patch.
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
if repo[None].branch() != "default":
return "cannot run hg clpatch outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
def undo(ui, repo, clname, **opts):
"""undo the effect of a CL
Creates a new CL that undoes an earlier CL.
After creating the CL, opens the CL text for editing so that
you can add the reason for the undo to the description.
"""
if repo[None].branch() != "default":
return "cannot run hg undo outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="undo")
def release_apply(ui, repo, clname, **opts):
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch.
The current client must either be clean or already be in
the release branch.
The release branch must be created by starting with a
clean client, disabling the code review plugin, and running:
hg update weekly.YYYY-MM-DD
hg branch release-branch.rNN
hg commit -m 'create release-branch.rNN'
hg push --new-branch
Then re-enable the code review plugin.
People can test the release branch by running
hg update release-branch.rNN
in a clean client. To return to the normal tree,
hg update default
Move changes since the weekly into the release branch
using hg release-apply followed by the usual code review
process and hg submit.
When it comes time to tag the release, record the
final long-form tag of the release-branch.rNN
in the *default* branch's .hgtags file. That is, run
hg update default
and then edit .hgtags as you would for a weekly.
"""
c = repo[None]
if not releaseBranch:
return "no active release branches"
if c.branch() != releaseBranch:
if c.modified() or c.added() or c.removed():
raise util.Abort("uncommitted local changes - cannot switch branches")
err = hg.clean(repo, releaseBranch)
if err:
return err
try:
err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
if err:
raise util.Abort(err)
except Exception, e:
hg.clean(repo, "default")
raise e
return None
def rev2clname(rev):
# Extract CL name from revision description.
# The last line in the description that is a codereview URL is the real one.
# Earlier lines might be part of the user-written description.
all = re.findall('(?m)^http://codereview.appspot.com/([0-9]+)$', rev.description())
if len(all) > 0:
return all[-1]
return ""
undoHeader = """undo CL %s / %s
<enter reason for undo>
««« original CL description
"""
undoFooter = """
»»»
"""
backportHeader = """[%s] %s
««« CL %s / %s
"""
backportFooter = """
»»»
"""
# Implementation of clpatch/undo.
def clpatch_or_undo(ui, repo, clname, opts, mode):
if missing_codereview:
return missing_codereview
if mode == "undo" or mode == "backport":
if hgversion < '1.4':
# Don't have cmdutil.match (see implementation of sync command).
return "hg is too old to run hg %s - update to 1.4 or newer" % mode
# Find revision in Mercurial repository.
# Assume CL number is 7+ decimal digits.
# Otherwise is either change log sequence number (fewer decimal digits),
# hexadecimal hash, or tag name.
# Mercurial will fall over long before the change log
# sequence numbers get to be 7 digits long.
if re.match('^[0-9]{7,}$', clname):
found = False
matchfn = scmutil.match(repo, [], {'rev': None})
def prep(ctx, fns):
pass
for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev': None}, prep):
rev = repo[ctx.rev()]
# Last line with a code review URL is the actual review URL.
# Earlier ones might be part of the CL description.
n = rev2clname(rev)
if n == clname:
found = True
break
if not found:
return "cannot find CL %s in local repository" % clname
else:
rev = repo[clname]
if not rev:
return "unknown revision %s" % clname
clname = rev2clname(rev)
if clname == "":
return "cannot find CL name in revision description"
# Create fresh CL and start with patch that would reverse the change.
vers = short(rev.node())
cl = CL("new")
desc = str(rev.description())
if mode == "undo":
cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
else:
cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
v1 = vers
v0 = short(rev.parents()[0].node())
if mode == "undo":
arg = v1 + ":" + v0
else:
vers = v0
arg = v0 + ":" + v1
patch = RunShell(["hg", "diff", "--git", "-r", arg])
else: # clpatch
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
if patch == emptydiff:
return "codereview issue %s has no diff" % clname
# find current hg version (hg identify)
ctx = repo[None]
parents = ctx.parents()
id = '+'.join([short(p.node()) for p in parents])
# if version does not match the patch version,
# try to update the patch line numbers.
if vers != "" and id != vers:
# "vers in repo" gives the wrong answer
# on some versions of Mercurial. Instead, do the actual
# lookup and catch the exception.
try:
repo[vers].description()
except:
return "local repository is out of date; sync to get %s" % (vers)
patch1, err = portPatch(repo, patch, vers, id)
if err != "":
if not opts["ignore_hgpatch_failure"]:
return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
else:
patch = patch1
argv = ["hgpatch"]
if opts["no_incoming"] or mode == "backport":
argv += ["--checksync=false"]
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
except:
return "hgpatch: " + ExceptionDetail()
out, err = cmd.communicate(patch)
if cmd.returncode != 0 and not opts["ignore_hgpatch_failure"]:
return "hgpatch failed"
cl.local = True
cl.files = out.strip().split()
if not cl.files and not opts["ignore_hgpatch_failure"]:
return "codereview issue %s has no changed files" % clname
files = ChangedFiles(ui, repo, [], opts)
extra = Sub(cl.files, files)
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
if mode == "undo":
err = EditCL(ui, repo, cl)
if err != "":
return "CL created, but error editing: " + err
cl.Flush(ui, repo)
else:
ui.write(cl.PendingText() + "\n")
# portPatch rewrites patch from being a patch against
# oldver to being a patch against newver.
def portPatch(repo, patch, oldver, newver):
lines = patch.splitlines(True) # True = keep \n
delta = None
for i in range(len(lines)):
line = lines[i]
if line.startswith('--- a/'):
file = line[6:-1]
delta = fileDeltas(repo, file, oldver, newver)
if not delta or not line.startswith('@@ '):
continue
# @@ -x,y +z,w @@ means the patch chunk replaces
# the original file's line numbers x up to x+y with the
# line numbers z up to z+w in the new file.
# Find the delta from x in the original to the same
# line in the current version and add that delta to both
# x and z.
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
return None, "error parsing patch line numbers"
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
d, err = lineDelta(delta, n1, len1)
if err != "":
return "", err
n1 += d
n2 += d
lines[i] = "@@ -%d,%d +%d,%d @@\n" % (n1, len1, n2, len2)
newpatch = ''.join(lines)
return newpatch, ""
# fileDelta returns the line number deltas for the given file's
# changes from oldver to newver.
# The deltas are a list of (n, len, newdelta) triples that say
# lines [n, n+len) were modified, and after that range the
# line numbers are +newdelta from what they were before.
def fileDeltas(repo, file, oldver, newver):
cmd = ["hg", "diff", "--git", "-r", oldver + ":" + newver, "path:" + file]
data = RunShell(cmd, silent_ok=True)
deltas = []
for line in data.splitlines():
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
continue
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
deltas.append((n1, len1, n2+len2-(n1+len1)))
return deltas
# lineDelta finds the appropriate line number delta to apply to the lines [n, n+len).
# It returns an error if those lines were rewritten by the patch.
def lineDelta(deltas, n, len):
d = 0
for (old, oldlen, newdelta) in deltas:
if old >= n+len:
break
if old+len > n:
return 0, "patch and recent changes conflict"
d = newdelta
return d, ""
def download(ui, repo, clname, **opts):
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff, downloaded from the code review server.
"""
if missing_codereview:
return missing_codereview
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
ui.write(cl.EditorText() + "\n")
ui.write(patch + "\n")
return
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
if missing_codereview:
return missing_codereview
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats, opts)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if missing_codereview:
return missing_codereview
files = ChangedExistingFiles(ui, repo, pats, opts)
files = [f for f in files if f.endswith(".go")]
if not files:
return "no modified go files"
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if os.spawnvp(os.P_WAIT, "gofmt", cmd + files) != 0:
raise util.Abort("gofmt did not exit cleanly")
except error.Abort, e:
raise
except:
raise util.Abort("gofmt: " + ExceptionDetail())
return
def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
if missing_codereview:
return missing_codereview
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc:
return "no reviewers listed in CL"
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
if cl.files == []:
return "no changed files, not sending mail"
cl.Mail(ui, repo)
def pending(ui, repo, *pats, **opts):
"""show pending changes
Lists pending changes followed by a list of unassigned but modified files.
"""
if missing_codereview:
return missing_codereview
m = LoadAllCL(ui, repo, web=True)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
ui.write(cl.PendingText() + "\n")
files = DefaultFiles(ui, repo, [], opts)
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
def reposetup(ui, repo):
global original_match
if original_match is None:
global global_repo, global_ui
global_repo = repo
global_ui = ui
start_status_thread()
original_match = scmutil.match
scmutil.match = ReplacementForCmdutilMatch
RietveldSetup(ui, repo)
def CheckContributor(ui, repo, user=None):
set_status("checking CONTRIBUTORS file")
user, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
def FindContributor(ui, repo, user=None, warn=True):
if not user:
user = ui.config("ui", "username")
if not user:
raise util.Abort("[ui] username is not configured in .hgrc")
user = user.lower()
m = re.match(r".*<(.*)>", user)
if m:
user = m.group(1)
if user not in contributors:
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return user, None
user, email = contributors[user]
return email, "%s <%s>" % (user, email)
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
if missing_codereview:
return missing_codereview
# We already called this on startup but sometimes Mercurial forgets.
set_mercurial_encoding_to_utf8()
other = getremote(ui, repo, opts)
repo.ui.quiet = True
if not opts["no_incoming"] and incoming(repo, other):
return "local repository out of date; must sync before submit"
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
user = None
if cl.copied_from:
user = cl.copied_from
userline = CheckContributor(ui, repo, user)
typecheck(userline, str)
about = ""
if cl.reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in cl.reviewer]) + "\n"
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer:
return "no reviewers listed in CL"
if not cl.local:
return "cannot submit non-local CL"
# upload, to sync current patch and also get change number if CL is new.
if not cl.copied_from:
cl.Upload(ui, repo, gofmt_just_warn=True)
# check gofmt for real; allowed upload to warn in order to save CL.
cl.Flush(ui, repo)
CheckFormat(ui, repo, cl.files)
about += "%s%s\n" % (server_url_base, cl.name)
if cl.copied_from:
about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
typecheck(about, str)
if not cl.mailed and not cl.copied_from: # in case this is TBR
cl.Mail(ui, repo)
# submit changes locally
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
typecheck(opts['date'], str)
opts['message'] = cl.desc.rstrip() + "\n\n" + about
typecheck(opts['message'], str)
if opts['dryrun']:
print "NOT SUBMITTING:"
print "User: ", userline
print "Message:"
print Indent(opts['message'], "\t")
print "Files:"
print Indent('\n'.join(cl.files), "\t")
return "dry run; not submitted"
set_status("pushing " + cl.name + " to remote server")
other = getremote(ui, repo, opts)
if outgoing(repo):
raise util.Abort("local repository corrupt or out-of-phase with remote: found outgoing changes")
m = match.exact(repo.root, repo.getcwd(), cl.files)
node = repo.commit(ustr(opts['message']), ustr(userline), opts.get('date'), m)
if not node:
return "nothing changed"
# push to remote; if it fails for any reason, roll back
try:
log = repo.changelog
rev = log.rev(node)
parents = log.parentrevs(rev)
if (rev-1 not in parents and
(parents == (nullrev, nullrev) or
len(log.heads(log.node(parents[0]))) > 1 and
(parents[1] == nullrev or len(log.heads(log.node(parents[1]))) > 1))):
# created new head
raise util.Abort("local repository out of date; must sync before submit")
# push changes to remote.
# if it works, we're committed.
# if not, roll back
r = repo.push(other, False, None)
if r == 0:
raise util.Abort("local repository out of date; must sync before submit")
except:
real_rollback()
raise
# we're committed. upload final patch, close review, add commit message
changeURL = short(node)
url = other.url()
m = re.match("^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/?", url)
if m:
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(2), changeURL)
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + opts['message']
# When posting, move reviewers to CC line,
# so that the issue stops showing up in their "My Issues" page.
PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
c = repo[None]
if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
ui.write("switching from %s to default branch.\n" % releaseBranch)
err = hg.clean(repo, "default")
if err:
return err
return None
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
if missing_codereview:
return missing_codereview
if not opts["local"]:
ui.status = sync_note
ui.note = sync_note
other = getremote(ui, repo, opts)
modheads = repo.pull(other)
err = commands.postincoming(ui, repo, modheads, True, "tip")
if err:
return err
commands.update(ui, repo, rev="default")
sync_changes(ui, repo)
def sync_note(msg):
# we run sync (pull -u) in verbose mode to get the
# list of files being updated, but that drags along
# a bunch of messages we don't care about.
# omit them.
if msg == 'resolving manifests\n':
return
if msg == 'searching for changes\n':
return
if msg == "couldn't find merge tool hgmerge\n":
return
sys.stdout.write(msg)
def sync_changes(ui, repo):
# Look through recent change log descriptions to find
# potential references to http://.*/our-CL-number.
# Double-check them by looking at the Rietveld log.
def Rev(rev):
desc = repo[rev].description().strip()
for clname in re.findall('(?m)^http://(?:[^\n]+)/([0-9]+)$', desc):
if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
cl, err = LoadCL(ui, repo, clname, web=False)
if err != "":
ui.warn("loading CL %s: %s\n" % (clname, err))
continue
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
if hgversion < '1.4':
get = util.cachefunc(lambda r: repo[r].changeset())
changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, [], get, {'rev': None})
n = 0
for st, rev, fns in changeiter:
if st != 'iter':
continue
n += 1
if n > 100:
break
Rev(rev)
else:
matchfn = scmutil.match(repo, [], {'rev': None})
def prep(ctx, fns):
pass
for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev': None}, prep):
Rev(ctx.rev())
# Remove files that are not modified from the CLs in which they appear.
all = LoadAllCL(ui, repo, web=False)
changed = ChangedFiles(ui, repo, [], {})
for _, cl in all.items():
extra = Sub(cl.files, changed)
if extra:
ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
for f in extra:
ui.warn("\t%s\n" % (f,))
cl.files = Sub(cl.files, extra)
cl.Flush(ui, repo)
if not cl.files:
if not cl.copied_from:
ui.warn("CL %s has no files; delete (abandon) with hg change -d %s\n" % (cl.name, cl.name))
else:
ui.warn("CL %s has no files; delete locally with hg change -D %s\n" % (cl.name, cl.name))
return
def upload(ui, repo, name, **opts):
"""upload diffs to the code review server
Uploads the current modifications for a given change to the server.
"""
if missing_codereview:
return missing_codereview
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
return err
if not cl.local:
return "cannot upload non-local change"
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
('p', 'pending', None, 'print pending summary to standard output'),
],
"[-d | -D] [-i] [-o] change# or FILE ..."
),
"^clpatch": (
clpatch,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# Would prefer to call this codereview-login, but then
# hg help codereview prints the help for this command
# instead of the help for the extension.
"code-login": (
code_login,
[],
"",
),
"^download": (
download,
[],
"change#"
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^gofmt": (
gofmt,
[
('l', 'list', None, 'list files that would change, but do not edit them'),
],
"FILE ..."
),
"^pending|p": (
pending,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^release-apply": (
release_apply,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# TODO: release-start, release-tag, weekly-tag
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
('n', 'dryrun', None, 'make change only locally (for testing)'),
] + commands.walkopts + commands.commitopts + commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[
('', 'local', None, 'do not pull changes from remote repository')
],
"[--local]",
),
"^undo": (
undo,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
def JSONGet(ui, path):
try:
data = MySend(path, force_auth=False)
typecheck(data, str)
d = fix_json(json.loads(data))
except:
ui.warn("JSONGet %s: %s\n" % (path, ExceptionDetail()))
return None
return d
# Clean up json parser output to match our expectations:
# * all strings are UTF-8-encoded str, not unicode.
# * missing fields are missing, not None,
# so that d.get("foo", defaultvalue) works.
def fix_json(x):
if type(x) in [str, int, float, bool, type(None)]:
pass
elif type(x) is unicode:
x = x.encode("utf-8")
elif type(x) is list:
for i in range(len(x)):
x[i] = fix_json(x[i])
elif type(x) is dict:
todel = []
for k in x:
if x[k] is None:
todel.append(k)
else:
x[k] = fix_json(x[k])
for k in todel:
del x[k]
else:
raise util.Abort("unknown type " + str(type(x)) + " in fix_json")
if type(x) is str:
x = x.replace('\r\n', '\n')
return x
def IsRietveldSubmitted(ui, clname, hex):
dict = JSONGet(ui, "/api/" + clname + "?messages=true")
if dict is None:
return False
for msg in dict.get("messages", []):
text = msg.get("text", "")
m = re.match('\*\*\* Submitted as [^*]*?([0-9a-f]+) \*\*\*', text)
if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
return True
return False
def IsRietveldMailed(cl):
for msg in cl.dict.get("messages", []):
if msg.get("text", "").find("I'd like you to review this change") >= 0:
return True
return False
def DownloadCL(ui, repo, clname):
set_status("downloading CL " + clname)
cl, err = LoadCL(ui, repo, clname, web=True)
if err != "":
return None, None, None, "error loading CL %s: %s" % (clname, err)
# Find most recent diff
diffs = cl.dict.get("patchsets", [])
if not diffs:
return None, None, None, "CL has no patch sets"
patchid = diffs[-1]
patchset = JSONGet(ui, "/api/" + clname + "/" + str(patchid))
if patchset is None:
return None, None, None, "error loading CL patchset %s/%d" % (clname, patchid)
if patchset.get("patchset", 0) != patchid:
return None, None, None, "malformed patchset information"
vers = ""
msg = patchset.get("message", "").split()
if len(msg) >= 3 and msg[0] == "diff" and msg[1] == "-r":
vers = msg[2]
diff = "/download/issue" + clname + "_" + str(patchid) + ".diff"
diffdata = MySend(diff, force_auth=False)
# Print warning if email is not in CONTRIBUTORS file.
email = cl.dict.get("owner_email", "")
if not email:
return None, None, None, "cannot find owner for %s" % (clname)
him = FindContributor(ui, repo, email)
me = FindContributor(ui, repo, None)
if him == me:
cl.mailed = IsRietveldMailed(cl)
else:
cl.copied_from = email
return cl, vers, diffdata, ""
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend1(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated and force_auth:
self._Authenticate()
if request_path is None:
return
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
# Translate \r\n into \n, because Rietveld doesn't.
response = response.replace('\r\n', '\n')
# who knows what urllib will give us
if type(response) == unicode:
response = response.encode("utf-8")
typecheck(response, str)
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(ustr(MySend(url))) # f.feed wants unicode
f.close()
# convert back to utf-8 to restore sanity
m = {}
for k,v in f.map.items():
m[k.encode("utf-8")] = v.replace("\r\n", "\n").encode("utf-8")
return m
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=False, private=False):
set_status("uploading change to description")
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed:
form_fields['closed'] = "checked"
if private:
form_fields['private'] = "checked"
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
set_status("uploading message")
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail:
form_fields['send_mail'] = "checked"
else:
del form_fields['send_mail']
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1' # Don't include draft comments
if reviewers is not None or cc is not None:
form_fields['message_only'] = '' # Must set '' in order to override cc/reviewer
ctype = "applications/x-www-form-urlencoded"
body = urllib.urlencode(form_fields)
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def nocommit(*pats, **opts):
"""(disabled when using this extension)"""
raise util.Abort("codereview extension enabled; use mail, upload, or submit instead of commit")
def nobackout(*pats, **opts):
"""(disabled when using this extension)"""
raise util.Abort("codereview extension enabled; use undo instead of backout")
def norollback(*pats, **opts):
"""(disabled when using this extension)"""
raise util.Abort("codereview extension enabled; use undo instead of rollback")
def RietveldSetup(ui, repo):
global defaultcc, upload_options, rpc, server, server_url_base, force_google_account, verbosity, contributors
global missing_codereview
repo_config_path = ''
# Read repository-specific options from lib/codereview/codereview.cfg
try:
repo_config_path = repo.root + '/lib/codereview/codereview.cfg'
f = open(repo_config_path)
for line in f:
if line.startswith('defaultcc: '):
defaultcc = SplitCommaSpace(line[10:])
except:
# If there are no options, chances are good this is not
# a code review repository; stop now before we foul
# things up even worse. Might also be that repo doesn't
# even have a root. See issue 959.
if repo_config_path == '':
missing_codereview = 'codereview disabled: repository has no root'
else:
missing_codereview = 'codereview disabled: cannot open ' + repo_config_path
return
# Should only modify repository with hg submit.
# Disable the built-in Mercurial commands that might
# trip things up.
cmdutil.commit = nocommit
global real_rollback
real_rollback = repo.rollback
repo.rollback = norollback
# would install nobackout if we could; oh well
try:
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
raise util.Abort("cannot open %s: %s" % (repo.root+'/CONTRIBUTORS', ExceptionDetail()))
for line in f:
# CONTRIBUTORS is a list of lines like:
# Person <email>
# Person <email> <alt-email>
# The first email address is the one used in commit logs.
if line.startswith('#'):
continue
m = re.match(r"([^<>]+\S)\s+(<[^<>\s]+>)((\s+<[^<>\s]+>)*)\s*$", line)
if m:
name = m.group(1)
email = m.group(2)[1:-1]
contributors[email.lower()] = (name, email)
for extra in m.group(3).split():
contributors[extra[1:-1].lower()] = (name, email)
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
server_url_base = "http://" + server + "/"
testing = ui.config("codereview", "testing")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = None
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.revision = None
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
global releaseBranch
tags = repo.branchtags().keys()
if 'release-branch.r100' in tags:
# NOTE(rsc): This tags.sort is going to get the wrong
# answer when comparing release-branch.r99 with
# release-branch.r100. If we do ten releases a year
# that gives us 4 years before we have to worry about this.
raise util.Abort('tags.sort needs to be fixed for release-branch.r100')
tags.sort()
for t in tags:
if t.startswith('release-branch.'):
releaseBranch = t
#######################################################################
# http://codereview.appspot.com/static/upload.py, heavily edited.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = [
'application/javascript',
'application/x-javascript',
'application/x-freemind'
]
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" % (self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, "The user's access to the service has been disabled."
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Disable status prints so they don't obscure the password prompt.
global global_status
st = global_status
global_status = None
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
# Put status back.
global_status = st
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host, save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True, env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines, env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
set_status("uploading " + filename)
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [
("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)])
response_body = rpc_server.Send(url, body, content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
# Don't want to spawn too many threads, nor do we want to
# hit Rietveld too hard, or it will start serving 500 errors.
# When 8 works, it's no better than 4, and sometimes 8 is
# too many for Rietveld to handle.
MAX_PARALLEL_UPLOADS = 4
sema = threading.BoundedSemaphore(MAX_PARALLEL_UPLOADS)
upload_threads = []
finished_upload_threads = []
class UploadFileThread(threading.Thread):
def __init__(self, args):
threading.Thread.__init__(self)
self.args = args
def run(self):
UploadFile(*self.args)
finished_upload_threads.append(self)
sema.release()
def StartUploadFile(*args):
sema.acquire()
while len(finished_upload_threads) > 0:
t = finished_upload_threads.pop()
upload_threads.remove(t)
t.join()
t = UploadFileThread(args)
upload_threads.append(t)
t.start()
def WaitForUploads():
for t in upload_threads:
t.join()
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
StartUploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
StartUploadFile(filename, file_id, new_content, is_binary, status, False)
WaitForUploads()
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class FakeMercurialUI(object):
def __init__(self):
self.quiet = True
self.output = ''
def write(self, *args, **opts):
self.output += ' '.join(args)
def copy(self):
return self
def status(self, *args, **opts):
pass
def readconfig(self, *args, **opts):
pass
def expandpath(self, *args, **opts):
return global_ui.expandpath(*args, **opts)
def configitems(self, *args, **opts):
return global_ui.configitems(*args, **opts)
def config(self, *args, **opts):
return global_ui.config(*args, **opts)
use_hg_shell = False # set to True to shell out to hg always; slower
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, ui, repo):
super(MercurialVCS, self).__init__(options)
self.ui = ui
self.repo = repo
self.status = None
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo.root)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
mqparent, err = RunShellWithReturnCode(['hg', 'log', '--rev', 'qparent', '--template={node}'])
if not err and mqparent != "":
self.base_rev = mqparent
else:
self.base_rev = RunShell(["hg", "parents", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def get_hg_status(self, rev, path):
# We'd like to use 'hg status -C path', but that is buggy
# (see http://mercurial.selenic.com/bts/issue3023).
# Instead, run 'hg status -C' without a path
# and skim the output for the path we want.
if self.status is None:
if use_hg_shell:
out = RunShell(["hg", "status", "-C", "--rev", rev])
else:
fui = FakeMercurialUI()
ret = commands.status(fui, self.repo, *[], **{'rev': [rev], 'copies': True})
if ret:
raise util.Abort(ret)
out = fui.output
self.status = out.splitlines()
for i in range(len(self.status)):
# line is
# A path
# M path
# etc
line = self.status[i].replace('\\', '/')
if line[2:] == path:
if i+1 < len(self.status) and self.status[i+1][:2] == ' ':
return self.status[i:i+2]
return self.status[i:i+1]
raise util.Abort("no status for " + path)
def GetBaseFile(self, filename):
set_status("inspecting " + filename)
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
out = self.get_hg_status(self.base_rev, relpath)
status, what = out[0].split(' ', 1)
if len(out) > 1 and status == "A" and what == relpath:
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
if use_hg_shell:
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath], silent_ok=True)
else:
base_content = str(self.repo[base_rev][oldrelpath].data())
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content and use_hg_shell:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
set_status("uploading patch for " + patch[0])
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
| Python |
#!/usr/bin/env python
import os
from distutils.core import setup
import py2exe
setup(
console=['font_list_generator.py', 'font_list_generator_gui.py'],
#classifiers=?? #a list of classifiers list of strings (4)
#download_url='http://',
#platforms=[], #multiplatform - don't limit it
author='Shula Amokshim',
author_email='shula.amokshim@gmx.com',
description='generate font catalogs',
license='GPL v3',
long_description='generate font catalogs',
maintainer='Shula Amokshim',
maintainer_email='shula.amokshim@gmx.com',
name='Font-Sampler',
url='http://code.google.com/p/font-sampler',
version='1.0',
requires='wxPython',
data_files=[
('bitmaps', []),
('config', []),
]
)
| Python |
# -*- coding: utf-8 -*-
pil_colors = {
'indigo': (75, 0, 130),
'gold': (255, 215, 0),
'firebrick': (178, 34, 34),
'indianred': (205, 92, 92),
'yellow': (255, 255, 0),
'darkolivegreen': (85, 107, 47),
'darkseagreen': (143, 188, 143),
'mediumvioletred': (199, 21, 133),
'mediumorchid': (186, 85, 211),
'chartreuse': (127, 255, 0),
'mediumslateblue': (123, 104, 238),
'black': (0, 0, 0),
'springgreen': (0, 255, 127),
'crimson': (220, 20, 60),
'lightsalmon': (255, 160, 122),
'brown': (165, 42, 42),
'turquoise': (64, 224, 208),
'olivedrab': (107, 142, 35),
'cyan': (0, 255, 255),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'gray': (128, 128, 128),
'darkturquoise': (0, 206, 209),
'goldenrod': (218, 165, 32),
'darkgreen': (0, 100, 0),
'darkviolet': (148, 0, 211),
'darkgray': (169, 169, 169),
'lightpink': (255, 182, 193),
'teal': (0, 128, 128),
'darkmagenta': (139, 0, 139),
'lightgoldenrodyellow': (250, 250, 210),
'lavender': (230, 230, 250),
'yellowgreen': (154, 205, 50),
'thistle': (216, 191, 216),
'violet': (238, 130, 238),
'navy': (0, 0, 128),
'orchid': (218, 112, 214),
'blue': (0, 0, 255),
'ghostwhite': (248, 248, 255),
'honeydew': (240, 255, 240),
'cornflowerblue': (100, 149, 237),
'darkblue': (0, 0, 139),
'darkkhaki': (189, 183, 107),
'mediumpurple': (147, 112, 219),
'cornsilk': (255, 248, 220),
'red': (255, 0, 0),
'bisque': (255, 228, 196),
'slategray': (112, 128, 144),
'darkcyan': (0, 139, 139),
'khaki': (240, 230, 140),
'wheat': (245, 222, 179),
'deepskyblue': (0, 191, 255),
'darkred': (139, 0, 0),
'steelblue': (70, 130, 180),
'aliceblue': (240, 248, 255),
'gainsboro': (220, 220, 220),
'mediumturquoise': (72, 209, 204),
'floralwhite': (255, 250, 240),
'coral': (255, 127, 80),
'purple': (128, 0, 128),
'lightgrey': (211, 211, 211),
'lightcyan': (224, 255, 255),
'darksalmon': (233, 150, 122),
'beige': (245, 245, 220),
'azure': (240, 255, 255),
'lightsteelblue': (176, 196, 222),
'oldlace': (253, 245, 230),
'greenyellow': (173, 255, 47),
'royalblue': (65, 105, 225),
'lightseagreen': (32, 178, 170),
'mistyrose': (255, 228, 225),
'sienna': (160, 82, 45),
'lightcoral': (240, 128, 128),
'orangered': (255, 69, 0),
'navajowhite': (255, 222, 173),
'lime': (0, 255, 0),
'palegreen': (152, 251, 152),
'burlywood': (222, 184, 135),
'seashell': (255, 245, 238),
'mediumspringgreen': (0, 250, 154),
'fuchsia': (255, 0, 255),
'papayawhip': (255, 239, 213),
'blanchedalmond': (255, 235, 205),
'peru': (205, 133, 63),
'aquamarine': (127, 255, 212),
'white': (255, 255, 255),
'darkslategray': (47, 79, 79),
'ivory': (255, 255, 240),
'dodgerblue': (30, 144, 255),
'lemonchiffon': (255, 250, 205),
'chocolate': (210, 105, 30),
'orange': (255, 165, 0),
'forestgreen': (34, 139, 34),
'slateblue': (106, 90, 205),
'olive': (128, 128, 0),
'mintcream': (245, 255, 250),
'antiquewhite': (250, 235, 215),
'darkorange': (255, 140, 0),
'cadetblue': (95, 158, 160),
'moccasin': (255, 228, 181),
'limegreen': (50, 205, 50),
'saddlebrown': (139, 69, 19),
'darkslateblue': (72, 61, 139),
'lightskyblue': (135, 206, 250),
'deeppink': (255, 20, 147),
'plum': (221, 160, 221),
'aqua': (0, 255, 255),
'darkgoldenrod': (184, 134, 11),
'maroon': (128, 0, 0),
'sandybrown': (244, 164, 96),
'magenta': (255, 0, 255),
'tan': (210, 180, 140),
'rosybrown': (188, 143, 143),
'pink': (255, 192, 203),
'lightblue': (173, 216, 230),
'palevioletred': (219, 112, 147),
'mediumseagreen': (60, 179, 113),
'dimgray': (105, 105, 105),
'powderblue': (176, 224, 230),
'seagreen': (46, 139, 87),
'snow': (255, 250, 250),
'mediumblue': (0, 0, 205),
'midnightblue': (25, 25, 112),
'paleturquoise': (175, 238, 238),
'palegoldenrod': (238, 232, 170),
'whitesmoke': (245, 245, 245),
'darkorchid': (153, 50, 204),
'salmon': (250, 128, 114),
'lightslategray': (119, 136, 153),
'lawngreen': (124, 252, 0),
'lightgreen': (144, 238, 144),
'tomato': (255, 99, 71),
'hotpink': (255, 105, 180),
'lightyellow': (255, 255, 224),
'lavenderblush': (255, 240, 245),
'linen': (250, 240, 230),
'mediumaquamarine': (102, 205, 170),
'green': (0, 128, 0),
'blueviolet': (138, 43, 226),
'peachpuff': (255, 218, 185),
}
import ImageDraw, Image, ImageFont
import os,sys
from optparse import OptionParser #not needed if used as a module
def simple_bidi_reverse(s):
return s
s = unicode(s)
news = ''
hebrew = u'אבגדהוזחטיכךלמםנןסעפףצץקרשתֱֲֳִֵֶַָֹּׁׂ'
hebword = ''
for c in s:
if c in hebrew:
if not hebword: #start new hebword
hebword = c
else:
hebword += c # cont. hebword
continue
else:
if hebword:
hebword = hebword[::-1] #reverse last hebword so far
news += hebword
hebword = '' # falsify hebword
else:
news += c
return news
def parse_options():
option_parser = OptionParser()
option_parser.add_option('-f', '--fore', action='store', dest='fore', default='black', help="fore color [default: black]", type='string')
option_parser.add_option('-b', '--background', action='store', dest='background', default='white', help="bg color [default: white]", type='string')
option_parser.add_option('-s', '--split-height', action='store', dest='height', default=0, help="split new image every n pixels. default=0 (no split, single image)", type='int')
option_parser.add_option('-t', '--text-sample', action='store', dest='sample', default='', help="sample text", type='string')
option_parser.add_option('-p', '--font-dir', action='store', dest='fontdir', default=r'c:\windows\fonts', help=r"font directory (default: c:\windows\fonts)", type='string')
option_parser.add_option('-z', '--font-size', action='store', dest='size', default=22, help=r"font size (pixels)", type='int')
option_parser.add_option('-c', '--color-list', action='store_false', dest='colorlist', default=False, help=r"print a list of all colors")
option_parser.add_option('-o', '--output-dir', action='store', dest='outputdir', default='.', help=r"output directory")
(options, args) = option_parser.parse_args(sys.argv[1:])
options.help = option_parser.format_help()
temp = [unicode(opt)+': '+unicode(options.__dict__[opt]) for opt in options.__dict__ if opt!='help']
print 'command line options:', temp
return options
def color_to_hex(c):
return hex(c[0])[2:]+hex(c[1])[2:]+hex(c[2])[2:]
def generate_font_image(options):
if os.path.isfile(options.outputdir):
print 'fatal error - outputdir exists, and is a file. writing output to "."'
options.outputdir = os.path.abspath('.')
elif not os.path.isdir(options.outputdir):
os.makedirs( options.outputdir )
fontfiles = [options.fontdir + os.sep + f for f in os.listdir(options.fontdir) if f[-3:].lower()=='ttf']
if not fontfiles:
print 'no font files found'
sys.exit(1)
text = options.sample
#print type(text)
#print len(text)
#print text
if not text:
text = 'אבגד זהוחף קרשצת 123 ?$%^!'
if type(text) is unicode:
#text = text.decode('utf-8')
pass
width = 950
img_counter = 0
fontsize = options.size
ystep = int(fontsize*1.5)
i=0
img = False
tot = len(fontfiles)
height = options.height or len(fontfiles) * ystep + 20
if type(options.fore) is str:
forecolor = pil_colors[options.fore]
else:
try:
forecolor = options.fore
if len(forecolor)==3:
print forecolor
forecolor = forecolor[:3]
forecolorname = color_to_hex(forecolor)
except:
print 'fatal error - fore color not in right format'
return
if type(options.background) is str:
backcolor = pil_colors[options.background]
backcolorname = options.background
else:
try:
backcolor = options.background
if len(backcolor)==3:
backcolor = backcolor[:3]
backcolorname = color_to_hex(backcolor)
except:
print 'fatal error - color not in right format'
return
for fontfile in fontfiles:
if not img:
img = Image.new('RGB',(width, height), backcolor )
draw = ImageDraw.Draw(img)
draw.fontmode = 'I' #"1", "P", "I", "F"
y = 6
last_printed = False
i+=1
font = ImageFont.truetype(fontfile, fontsize)
wtf = '%s %s ' % (os.path.basename(fontfile) , font.getname()[0])
if type(wtf) is unicode:
pass #wtf = wtf.encode('utf-8')
text2 = wtf + text
print '%% %0.2d \r' % (100.0* i / tot),
draw = ImageDraw.Draw(img)
try:
draw.text((10, y), text2, font=font, fill=forecolor )
y += ystep
except:
print 'error',y,fontfile
# for testing: if y> 500: break
if y >= height:
#fname = 'fonts.%s-on-%s.%04d.png' % (forecolorname, backcolorname, img_counter)
fname = 'generated_fonts_%04d.png' % (img_counter)
fname = os.path.join( options.outputdir, fname )
print 'saving ' + fname
img.save(fname)
#init new image
img_counter += 1
img = False
last_printed=True
if not last_printed:
fname = 'generated_fonts_%04d.png' % (img_counter)
fname = os.path.join( options.outputdir, fname )
print 'saving ' + fname
img.save(fname)
return i
if __name__ == "__main__":
options = parse_options()
if options.colorlist:
for c in pil_colors:
print c[1]
sys.exit(0)
if options.fore not in pil_colors:
options.fore = 'black'
if options.background not in pil_colors:
options.background = 'white'
generate_font_image(options)
| Python |
#Boa:Frame:Frame1
import wx
import sys
import os
import wx.lib.filebrowsebutton
import AboutDialog
import font_list_generator
class Options:
pass
# ------------------------ private library ---------------
#from library import *
def directory_chooser_dialog(title, default):
dialog = wx.DirDialog(None, title, default)
if dialog.ShowModal() == wx.ID_OK:
return dialog.GetPath()
else:
return default
def msgbox(msg, title=''):
wx.MessageBox(msg)
def get_samples_dir():
return os.path.join(get_mydocuments_dir(), 'Font-Samples')
def get_mydocuments_dir():
if sys.platform in ['darwin', 'mac']:
from Carbon import Folder, Folders
folderref = Folder.FSFindFolder(Folders.kUserDomain,
Folders.kDocumentsFolderType, False)
mydocs = folderref.as_pathname()
elif 'win' in sys.platform:
from win32com.shell import shell
df = shell.SHGetDesktopFolder()
pidl = df.ParseDisplayName(0, None, "::{450d8fba-ad25-11d0-98a8-0800361b1103}")[1]
mydocs = shell.SHGetPathFromIDList(pidl)
elif 'linux' in sys.platform:
mydocs = '~/Desktop'
else:
mydocs = os.path.abspath('.') #cur dir
return mydocs
def get_fonts_dir():
if sys.platform in ['darwin', 'mac']:
return ''
elif 'win' in sys.platform:
return os.environ['windir'] + '\\' + 'Fonts'
elif 'linux' in sys.platform:
return '/usr/share/fonts/truetype'
else:
return os.path.abspath('.') #cur dir
# ------------------------ private library ---------------
def create(parent):
return Frame1(parent)
[wxID_FRAME1, wxID_FRAME1BACK_COLOR_PICKER, wxID_FRAME1BTNDIRPICKER,
wxID_FRAME1BTNEXIT, wxID_FRAME1BTNOUTPUTDIR, wxID_FRAME1BUTTON1,
wxID_FRAME1FONT_SIZE, wxID_FRAME1FORE_COLOR_PICKER, wxID_FRAME1PANEL1,
wxID_FRAME1SPLIT_HEIGHT, wxID_FRAME1STATICTEXT1, wxID_FRAME1STATICTEXT2,
wxID_FRAME1STATICTEXT3, wxID_FRAME1STATICTEXT4, wxID_FRAME1STATICTEXT5,
wxID_FRAME1STATICTEXT6, wxID_FRAME1STATICTEXT7, wxID_FRAME1STATUSBAR1,
wxID_FRAME1TXTDIRFONTDIR, wxID_FRAME1TXTOUTPUTDIR, wxID_FRAME1TXTSAMPLE,
] = [wx.NewId() for _init_ctrls in range(21)]
[wxID_FRAME1MENU1ITEMS0, wxID_FRAME1MENU1ITEMS1, wxID_FRAME1MENU1ITEMS2,
wxID_FRAME1MENU1ITEMS4, wxID_FRAME1MENU1ITEMS5, wxID_FRAME1MENU1ITEMS6,
wxID_FRAME1MENU1ITEMS7, wxID_FRAME1MENU1ITEMS8,
] = [wx.NewId() for _init_coll_menu1_Items in range(8)]
[wxID_FRAME1EDITMENUSITEMS0, wxID_FRAME1EDITMENUSITEMS1,
wxID_FRAME1EDITMENUSITEMS2, wxID_FRAME1EDITMENUSITEMS4,
wxID_FRAME1EDITMENUSITEMS5,
] = [wx.NewId() for _init_coll_editmenus_Items in range(5)]
[wxID_FRAME1FILEMENUSITEMS_EXIT, wxID_FRAME1FILEMENUSITEM_OPEN,
wxID_FRAME1FILEMENUSITEM_SAVE, wxID_FRAME1FILEMENUSITEM_SAVE_AS,
] = [wx.NewId() for _init_coll_filemenus_Items in range(4)]
[wxID_FRAME1HELPMENUSITEM_ABOUT, wxID_FRAME1HELPMENUSITEM_HELP_CONTENTS,
] = [wx.NewId() for _init_coll_helpmenus_Items in range(2)]
[wxID_FRAME1EDITMENUS_WTFITEMS1, wxID_FRAME1EDITMENUS_WTFITEMS2,
wxID_FRAME1EDITMENUS_WTFITEMS4, wxID_FRAME1EDITMENUS_WTFITEMS5,
wxID_FRAME1EDITMENUS_WTFITEM_DO_THIS,
] = [wx.NewId() for _init_coll_editmenus_wtf_Items in range(5)]
class Frame1(wx.Frame):
def _init_coll_menuBar1_Menus(self, parent):
# generated method, don't edit
parent.Append(menu=self.filemenus, title=u'&File')
parent.Append(menu=self.editmenus_wtf, title=u'&Edit')
parent.Append(menu=self.helpmenus, title=u'&Help')
def _init_coll_filemenus_Items(self, parent):
# generated method, don't edit
parent.Append(help='', id=wxID_FRAME1FILEMENUSITEM_OPEN,
kind=wx.ITEM_NORMAL, text=u'&Open')
parent.Append(help='', id=wxID_FRAME1FILEMENUSITEM_SAVE,
kind=wx.ITEM_NORMAL, text=u'&Save')
parent.Append(help='', id=wxID_FRAME1FILEMENUSITEM_SAVE_AS,
kind=wx.ITEM_NORMAL, text=u'Save &As...')
parent.AppendSeparator()
parent.Append(help=u'', id=wxID_FRAME1FILEMENUSITEMS_EXIT,
kind=wx.ITEM_NORMAL, text=u'E&xit')
self.Bind(wx.EVT_MENU, self.OnFilemenusItems_exitMenu,
id=wxID_FRAME1FILEMENUSITEMS_EXIT)
def _init_coll_helpmenus_Items(self, parent):
# generated method, don't edit
parent.Append(help=u'', id=wxID_FRAME1HELPMENUSITEM_HELP_CONTENTS,
kind=wx.ITEM_NORMAL, text=u'&Help contents')
parent.AppendSeparator()
parent.Append(help=u'', id=wxID_FRAME1HELPMENUSITEM_ABOUT,
kind=wx.ITEM_NORMAL, text=u'&About')
self.Bind(wx.EVT_MENU, self.OnHelpmenusItem_aboutMenu,
id=wxID_FRAME1HELPMENUSITEM_ABOUT)
def _init_coll_editmenus_wtf_Items(self, parent):
# generated method, don't edit
parent.Append(help='', id=wxID_FRAME1EDITMENUS_WTFITEM_DO_THIS,
kind=wx.ITEM_NORMAL, text=u'Do this')
parent.Append(help='', id=wxID_FRAME1EDITMENUS_WTFITEMS1,
kind=wx.ITEM_NORMAL, text=u'Do That...')
parent.Append(help='', id=wxID_FRAME1EDITMENUS_WTFITEMS2,
kind=wx.ITEM_NORMAL, text=u'Etc.')
parent.AppendSeparator()
parent.Append(help='', id=wxID_FRAME1EDITMENUS_WTFITEMS4,
kind=wx.ITEM_NORMAL, text='Items4')
parent.Append(help='', id=wxID_FRAME1EDITMENUS_WTFITEMS5,
kind=wx.ITEM_NORMAL, text='Items5')
self.Bind(wx.EVT_MENU, self.OnEditmenus_wtfItem_do_thisMenu,
id=wxID_FRAME1EDITMENUS_WTFITEM_DO_THIS)
def _init_coll_statusBar1_Fields(self, parent):
# generated method, don't edit
parent.SetFieldsCount(3)
parent.SetStatusText(number=0, text=u'Ready')
parent.SetStatusText(number=1, text=u'Font count')
parent.SetStatusText(number=2, text=u'\xa9 Shula')
parent.SetStatusWidths([-1, -2, -1])
def _init_utils(self):
# generated method, don't edit
self.menuBar1 = wx.MenuBar()
self.editmenus_wtf = wx.Menu(title=u'')
self.filemenus = wx.Menu(title=u'&File')
self.helpmenus = wx.Menu(title=u'&Help')
self._init_coll_menuBar1_Menus(self.menuBar1)
self._init_coll_editmenus_wtf_Items(self.editmenus_wtf)
self._init_coll_filemenus_Items(self.filemenus)
self._init_coll_helpmenus_Items(self.helpmenus)
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Frame.__init__(self, id=wxID_FRAME1, name='', parent=prnt,
pos=wx.Point(446, 273), size=wx.Size(333, 413),
style=wx.DEFAULT_FRAME_STYLE, title=u'Generate Font Samples')
self._init_utils()
self.SetClientSize(wx.Size(325, 386))
self.SetStatusBarPane(1)
self.SetMenuBar(self.menuBar1)
self.statusBar1 = wx.StatusBar(id=wxID_FRAME1STATUSBAR1,
name='statusBar1', parent=self, style=0)
self.statusBar1.SetStatusText(u'zorba')
self._init_coll_statusBar1_Fields(self.statusBar1)
self.SetStatusBar(self.statusBar1)
self.panel1 = wx.Panel(id=wxID_FRAME1PANEL1, name='panel1', parent=self,
pos=wx.Point(0, 0), size=wx.Size(325, 347),
style=wx.TAB_TRAVERSAL)
self.txtDirFontdir = wx.TextCtrl(id=wxID_FRAME1TXTDIRFONTDIR,
name=u'txtDirFontdir', parent=self.panel1, pos=wx.Point(96, 16),
size=wx.Size(184, 21), style=0, value=u'C:\\WINDOWS\\Fonts')
self.btnDirPicker = wx.Button(id=wxID_FRAME1BTNDIRPICKER, label=u'...',
name=u'btnDirPicker', parent=self.panel1, pos=wx.Point(280, 16),
size=wx.Size(27, 23), style=0)
self.btnDirPicker.Bind(wx.EVT_BUTTON, self.OnBtnDirPickerButton,
id=wxID_FRAME1BTNDIRPICKER)
self.back_color_picker = wx.ColourPickerCtrl(col=wx.Colour(253, 255,
185), id=wxID_FRAME1BACK_COLOR_PICKER, name=u'back_color_picker',
parent=self.panel1, pos=wx.Point(96, 56), size=wx.Size(72, 20),
style=wx.CLRP_DEFAULT_STYLE)
self.back_color_picker.SetToolTipString(u'zzzz in Pixels)')
self.fore_color_picker = wx.ColourPickerCtrl(col=wx.Colour(0, 0, 64),
id=wxID_FRAME1FORE_COLOR_PICKER, name=u'fore_color_picker',
parent=self.panel1, pos=wx.Point(96, 88), size=wx.Size(64, 24),
style=wx.CLRP_DEFAULT_STYLE)
self.split_height = wx.TextCtrl(id=wxID_FRAME1SPLIT_HEIGHT,
name=u'split_height', parent=self.panel1, pos=wx.Point(96, 128),
size=wx.Size(40, 21), style=0, value=u'600')
self.font_size = wx.TextCtrl(id=wxID_FRAME1FONT_SIZE, name=u'font_size',
parent=self.panel1, pos=wx.Point(96, 160), size=wx.Size(40, 21),
style=0, value=u'22')
self.txtSample = wx.TextCtrl(id=wxID_FRAME1TXTSAMPLE, name=u'txtSample',
parent=self.panel1, pos=wx.Point(96, 192), size=wx.Size(216, 21),
style=0,
value=u'\u05d0\u05b8\u05dc\u05b6\u05e9\u05d9\u05d5\u05e3 \u05e4\u05bc\u05b5\u05e8\u05b4\u05e7 1234 %^&*$')
self.button1 = wx.Button(id=wx.ID_OK, label=u'&Generate Samples',
name='button1', parent=self.panel1, pos=wx.Point(24, 280),
size=wx.Size(168, 56), style=0)
self.button1.SetToolTipString(u'button1')
self.button1.SetHelpText(u'')
self.button1.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD, False,
u'Arial'))
self.button1.Bind(wx.EVT_BUTTON, self.OnButton1Button, id=wx.ID_OK)
self.staticText2 = wx.StaticText(id=wxID_FRAME1STATICTEXT2,
label=u'Font (*.ttf) containing folder', name='staticText2',
parent=self.panel1, pos=wx.Point(8, 16), size=wx.Size(88, 32),
style=0)
self.staticText2.SetToolTipString(u'Height of each result image (in Pixels)')
self.staticText1 = wx.StaticText(id=wxID_FRAME1STATICTEXT1,
label=u'Text Color', name='staticText1', parent=self.panel1,
pos=wx.Point(32, 88), size=wx.Size(50, 13), style=0)
self.staticText3 = wx.StaticText(id=wxID_FRAME1STATICTEXT3,
label=u'Background Color', name='staticText3', parent=self.panel1,
pos=wx.Point(8, 56), size=wx.Size(84, 13), style=0)
self.staticText4 = wx.StaticText(id=wxID_FRAME1STATICTEXT4,
label=u'Split to (pixels)', name='staticText4',
parent=self.panel1, pos=wx.Point(16, 128), size=wx.Size(71, 13),
style=0)
self.staticText4.SetToolTipString(u'Height of each result image (in Pixels)')
self.staticText5 = wx.StaticText(id=wxID_FRAME1STATICTEXT5,
label=u'Font Size (pixels)', name='staticText5',
parent=self.panel1, pos=wx.Point(8, 160), size=wx.Size(82, 13),
style=0)
self.staticText5.SetToolTipString(u'Height of each result image (in Pixels)')
self.staticText6 = wx.StaticText(id=wxID_FRAME1STATICTEXT6,
label=u'Sample text', name='staticText6', parent=self.panel1,
pos=wx.Point(32, 192), size=wx.Size(57, 13), style=0)
self.btnExit = wx.Button(id=wx.ID_CANCEL, label=u'&Close',
name=u'btnExit', parent=self.panel1, pos=wx.Point(224, 280),
size=wx.Size(75, 56), style=0)
self.btnExit.Bind(wx.EVT_BUTTON, self.OnBtnExitButton, id=wx.ID_CANCEL)
self.btnOutputDir = wx.Button(id=wxID_FRAME1BTNOUTPUTDIR, label=u'...',
name=u'btnOutputDir', parent=self.panel1, pos=wx.Point(280, 224),
size=wx.Size(27, 23), style=0)
self.btnOutputDir.Bind(wx.EVT_BUTTON, self.OnBtnDirPickerButton,
id=wxID_FRAME1BTNOUTPUTDIR)
self.txtOutputDir = wx.TextCtrl(id=wxID_FRAME1TXTOUTPUTDIR,
name=u'txtOutputDir', parent=self.panel1, pos=wx.Point(96, 224),
size=wx.Size(176, 21), style=0, value=u'')
self.staticText7 = wx.StaticText(id=wxID_FRAME1STATICTEXT7,
label=u'Create samples in', name='staticText7',
parent=self.panel1, pos=wx.Point(8, 224), size=wx.Size(88, 16),
style=0)
def __init__(self, parent):
self._init_ctrls(parent)
self.txtOutputDir.Value = get_samples_dir()
self.txtDirFontdir.Value = get_fonts_dir()
def OnMenu1Items6Menu(self, event):
event.Skip()
def OnFilemenusItems_exitMenu(self, event):
self.Close()
def OnHelpmenusItem_aboutMenu(self, event):
#msgbox('Welcome to Boa Constructor 0.6.x\nThis template was created by Shula')
dlg = AboutDialog.Dialog1(self)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
def OnEditmenus_wtfItem_do_thisMenu(self, event):
d = directory_chooser_dialog('ahalan', 'd:\\shahar')
wx.MessageBox(d)
msgbox('done')
def OnBtnDirPickerButton(self, event):
self.txtDirFontdir.Value = directory_chooser_dialog('folder with *.ttf files', self.txtDirFontdir.Value )
def OnButton1Button(self, event):
print self.fore_color_picker.Colour
options = Options()
options.fore = self.fore_color_picker.Colour
options.background = self.back_color_picker.Colour
options.height = int(self.split_height.Value)
options.fontdir = self.txtDirFontdir.Value
options.size = int(self.font_size.Value)
options.sample = self.txtSample.Value
options.outputdir = self.txtOutputDir.Value
font_count = font_list_generator.generate_font_image(options)
self.set_status(0, 'done')
self.set_status(1, '%d fonts listed' % font_count)
print 'done.'
def OnBtnExitButton(self, event):
self.Close()
def set_status(self, field_num, text):
self.statusBar1.SetStatusText(text, field_num)
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = create(None)
frame.Show()
app.MainLoop()
| Python |
#Boa:Dialog:Dialog1
import wx
def create(parent):
return Dialog1(parent)
[wxID_DIALOG1, wxID_DIALOG1BTNCLOSE, wxID_DIALOG1STATICTEXT1,
wxID_DIALOG1STATICTEXT2,
] = [wx.NewId() for _init_ctrls in range(4)]
class Dialog1(wx.Dialog):
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Dialog.__init__(self, id=wxID_DIALOG1, name='', parent=prnt,
pos=wx.Point(299, 363), size=wx.Size(414, 190),
style=wx.DEFAULT_DIALOG_STYLE, title='Dialog1')
self.SetClientSize(wx.Size(406, 163))
self.Bind(wx.EVT_KEY_UP, self.OnDialog1KeyUp)
self.staticText1 = wx.StaticText(id=wxID_DIALOG1STATICTEXT1,
label=u'About Me', name='staticText1', parent=self,
pos=wx.Point(80, 16), size=wx.Size(112, 24), style=0)
self.staticText1.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL,
False, u'Comic Sans MS'))
self.btnClose = wx.Button(id=wx.ID_CANCEL, label=u'&Close',
name=u'btnClose', parent=self, pos=wx.Point(288, 120),
size=wx.Size(75, 23), style=0)
self.btnClose.Bind(wx.EVT_BUTTON, self.OnButton1Button,
id=wxID_DIALOG1BTNCLOSE)
self.staticText2 = wx.StaticText(id=wxID_DIALOG1STATICTEXT2,
label=u" Don Juan did not discuss the mastery of awareness with me until months later. We were at that time in the house where the nagual's party lived.",
name='staticText2', parent=self, pos=wx.Point(16, 48),
size=wx.Size(376, 56), style=0)
def __init__(self, parent):
self._init_ctrls(parent)
def OnDialog1KeyUp(self, event):
char = event.GetRawKeyCode()
if char in [13, 27, 32]: # enter, esc, spacebar
self.OnButton1Button(event)
def OnButton1Button(self, event):
self.Close()
| Python |
#!/usr/bin/env python
import os
from distutils.core import setup
import py2exe
setup(
console=['font_list_generator.py', 'font_list_generator_gui.py'],
#classifiers=?? #a list of classifiers list of strings (4)
#download_url='http://',
#platforms=[], #multiplatform - don't limit it
author='Shula Amokshim',
author_email='shula.amokshim@gmx.com',
description='generate font catalogs',
license='GPL v3',
long_description='generate font catalogs',
maintainer='Shula Amokshim',
maintainer_email='shula.amokshim@gmx.com',
name='Font-Sampler',
url='http://code.google.com/p/font-sampler',
version='1.0',
requires='wxPython',
data_files=[
('bitmaps', []),
('config', []),
]
)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
'''
Runtime functions.
'''
import datetime
UTC_NAME = '(UTC) Coordinated Universal Time'
class UserTimeZone(datetime.tzinfo):
'''
User time zone that used to display datetime.
'''
def __init__(self, name, utc_hour_offset, utc_min_offset, dst):
self._name = name
self._utcoffset = datetime.timedelta(hours=utc_hour_offset, minutes=utc_min_offset)
self._dst = datetime.timedelta(hours=dst)
def utcoffset(self, dt):
return self._utcoffset + self.dst(dt)
def dst(self, dt):
return self._dst
def tzname(self):
return self._name
# UTC time zone instance:
_UTC_TZ = UserTimeZone(UTC_NAME, 0, 0, 0)
def get_timezone_list():
'''
Return timezone list that contains tuples (utc_hour_offset, utc_minute_offset, dst_hour, timezone name).
'''
return [
(-12, 0, 0, '(UTC-12:00) International Date Line West',),
(-11, 0, 0, '(UTC-11:00) Coordinated Universal Time-11',),
(-11, 0, 0, '(UTC-11:00) Samoa',),
(-10, 0, 0, '(UTC-10:00) Hawaii',),
(-9, 0, 0, '(UTC-09:00) Alaska',),
(-8, 0, 0, '(UTC-08:00) Baja California',),
(-8, 0, 0, '(UTC-08:00) Pacific Time (US & Canada)',),
(-7, 0, 0, '(UTC-07:00) Arizona',),
(-7, 0, 0, '(UTC-07:00) Chihuahua, La Paz, Mazatlan',),
(-6, 0, 0, '(UTC-07:00) Mountain Time (US & Canada)',),
(-6, 0, 0, '(UTC-06:00) Central America',),
(-6, 0, 0, '(UTC-06:00) Central Time (US & Canada)',),
(-6, 0, 0, '(UTC-06:00) Guadalajara, Mexico City, Monterrey',),
(-6, 0, 0, '(UTC-06:00) Saskatchewan',),
(-5, 0, 0, '(UTC-05:00) Bogota, Lima, Quito',),
(-5, 0, 0, '(UTC-05:00) Eastern Time (US & Canada)',),
(-5, 0, 0, '(UTC-05:00) Indiana (East)',),
(-4, 30, 0, '(UTC-04:30) Caracas',),
(-4, 0, 0, '(UTC-04:00) Asuncion',),
(-4, 0, 0, '(UTC-04:00) Atlantic Time (Canada)',),
(-4, 0, 0, '(UTC-04:00) Cuiaba',),
(-4, 0, 0, '(UTC-04:00) Georgetown, La Paz, Manaus, San Juan',),
(-4, 0, 0, '(UTC-04:00) Santiago',),
(-3, 30, 0, '(UTC-03:30) Newfoundland',),
(-3, 0, 0, '(UTC-03:00) Brasilia',),
(-3, 0, 0, '(UTC-03:00) Buenos Aires',),
(-3, 0, 0, '(UTC-03:00) Cayenne, Fortaleza',),
(-3, 0, 0, '(UTC-03:00) Greenland',),
(-3, 0, 0, '(UTC-03:00) Montevideo',),
(-2, 0, 0, '(UTC-02:00) Coordinated Universal Time-02',),
(-2, 0, 0, '(UTC-02:00) Mid-Atlantic',),
(-1, 0, 0, '(UTC-01:00) Azores',),
(-1, 0, 0, '(UTC-01:00) Cape Verde Is.',),
(0, 0, 0, '(UTC) Casablanca',),
(0, 0, 0, UTC_NAME,),
(0, 0, 0, '(UTC) Dublin, Edinburgh, Lisbon, London',),
(0, 0, 0, '(UTC) Monrovia, Reykjavik',),
(1, 0, 0, '(UTC+01:00) Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna',),
(1, 0, 0, '(UTC+01:00) Belgrade, Bratislava, Budapest, Ljubljana, Prague',),
(1, 0, 0, '(UTC+01:00) Brussels, Copenhagen, Madrid, Paris'),
(1, 0, 0, '(UTC+01:00) Sarajevo, Skopje, Warsaw, Zagreb',),
(1, 0, 0, '(UTC+01:00) West Central Africa',),
(1, 0, 0, '(UTC+01:00) Windhoek',),
(2, 0, 0, '(UTC+02:00) Amman',),
(2, 0, 0, '(UTC+02:00) Athens, Bucharest, Istanbul',),
(2, 0, 0, '(UTC+02:00) Beirut',),
(2, 0, 0, '(UTC+02:00) Cairo',),
(2, 0, 0, '(UTC+02:00) Damascus',),
(2, 0, 0, '(UTC+02:00) Harare, Pretoria',),
(2, 0, 0, '(UTC+02:00) Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius',),
(2, 0, 0, '(UTC+02:00) Jerusalem',),
(2, 0, 0, '(UTC+02:00) Minsk',),
(3, 0, 0, '(UTC+03:00) Baghdad',),
(3, 0, 0, '(UTC+03:00) Kuwait, Riyadh',),
(3, 0, 0, '(UTC+03:00) Moscow, St. Petersburg, Volgograd',),
(3, 0, 0, '(UTC+03:00) Nairobi',),
(3, 30, 0, '(UTC+03:30) Tehran',),
(4, 0, 0, '(UTC+04:00) Abu Dhabi, Muscat',),
(4, 0, 0, '(UTC+04:00) Baku',),
(4, 0, 0, '(UTC+04:00) Port Louis',),
(4, 0, 0, '(UTC+04:00) Tbilisi',),
(4, 0, 0, '(UTC+04:00) Yerevan',),
(4, 30, 0, '(UTC+04:30) Kabul',),
(5, 0, 0, '(UTC+05:00) Ekaterinburg',),
(5, 0, 0, '(UTC+05:00) Islamabad, Karachi',),
(5, 0, 0, '(UTC+05:00) Tashkent',),
(5, 30, 0, '(UTC+05:30) Chennai, Kolkata, Mumbai, New Delhi',),
(5, 30, 0, '(UTC+05:30) Sri Jayawardenepura',),
(5, 45, 0, '(UTC+05:45) Kathmandu',),
(6, 0, 0, '(UTC+06:00) Astana',),
(6, 0, 0, '(UTC+06:00) Dhaka',),
(6, 0, 0, '(UTC+06:00) Novosibirsk',),
(6, 30, 0, '(UTC+06:30) Yangon (Rangoon)',),
(7, 0, 0, '(UTC+07:00) Bangkok, Hanoi, Jakarta',),
(7, 0, 0, '(UTC+07:00) Krasnoyarsk',),
(8, 0, 0, '(UTC+08:00) Beijing, Chongqing, Hong Kong, Urumqi',),
(8, 0, 0, '(UTC+08:00) Irkutsk',),
(8, 0, 0, '(UTC+08:00) Kuala Lumpur, Singapore',),
(8, 0, 0, '(UTC+08:00) Perth',),
(8, 0, 0, '(UTC+08:00) Taipei',),
(8, 0, 0, '(UTC+08:00) Ulaanbaatar',),
(9, 0, 0, '(UTC+09:00) Osaka, Sapporo, Tokyo',),
(9, 0, 0, '(UTC+09:00) Seoul',),
(9, 0, 0, '(UTC+09:00) Yakutsk',),
(9, 30, 0, '(UTC+09:30) Adelaide',),
(9, 30, 0, '(UTC+09:30) Darwin',),
(10, 0, 0, '(UTC+10:00) Brisbane',),
(10, 0, 0, '(UTC+10:00) Canberra, Melbourne, Sydney',),
(10, 0, 0, '(UTC+10:00) Guam, Port Moresby',),
(10, 0, 0, '(UTC+10:00) Hobart',),
(10, 0, 0, '(UTC+10:00) Vladivostok',),
(11, 0, 0, '(UTC+11:00) Magadan, Solomon Is., New Caledonia',),
(12, 0, 0, '(UTC+12:00) Auckland, Wellington',),
(12, 0, 0, '(UTC+12:00) Coordinated Universal Time+12',),
(12, 0, 0, '(UTC+12:00) Fiji'),
(13, 0, 0, '(UTC+13:00) Nuku\'alofa'),
]
def convert_datetime(naive_datetime, tzinfo):
return naive_datetime.replace(tzinfo=_UTC_TZ).astimezone(tzinfo)
def format_datetime(naive_dt, tzinfo, format):
'''
Format datetime.
'''
new_dt = naive_dt.replace(tzinfo=_UTC_TZ).astimezone(tzinfo)
return new_dt.strftime(format)
def format_date(naive_dt, tzinfo, format):
'''
Format date.
'''
new_dt = naive_dt.replace(tzinfo=_UTC_TZ).astimezone(tzinfo)
return new_dt.strftime(format)
def format_time(naive_dt, tzinfo, format):
'''
Format time.
'''
new_dt = naive_dt.replace(tzinfo=_UTC_TZ).astimezone(tzinfo)
return new_dt.strftime(format)
def get_runtime_utils(tzinfo, date_format=None, time_format=None):
if date_format is None:
date_format = '%Y-%m-%d'
if time_format is None:
time_format = '%H:%M:%S'
datetime_format = '%s %s' % (date_format, time_format)
return {
'format_datetime' : lambda dt : format_datetime(dt, tzinfo, datetime_format),
'format_date' : lambda dt : format_date(dt, tzinfo, date_format),
'format_time' : lambda dt : format_time(dt, tzinfo, time_format),
}
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Video app that display any public video from YouTube or other sites.
'''
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import base64
class BasicAuth(object):
"""Sets the Authorization header as defined in RFC1945"""
def __init__(self, user_id, password):
self.basic_cookie = base64.encodestring(
'%s:%s' % (user_id, password)).strip()
def modify_request(self, http_request):
http_request.headers['Authorization'] = 'Basic %s' % self.basic_cookie
ModifyRequest = modify_request
class NoAuth(object):
def modify_request(self, http_request):
pass
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockRequest(object):
"""Holds parameters of an HTTP request for matching against future requests.
"""
def __init__(self, operation, url, data=None, headers=None):
self.operation = operation
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
self.url = url
self.data = data
self.headers = headers
class MockResponse(atom.http_interface.HttpResponse):
"""Simulates an httplib.HTTPResponse object."""
def __init__(self, body=None, status=None, reason=None, headers=None):
if body and hasattr(body, 'read'):
self.body = body.read()
else:
self.body = body
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def read(self):
return self.body
class MockHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None, recordings=None, real_client=None):
"""An HttpClient which responds to request with stored data.
The request-response pairs are stored as tuples in a member list named
recordings.
The MockHttpClient can be switched from replay mode to record mode by
setting the real_client member to an instance of an HttpClient which will
make real HTTP requests and store the server's response in list of
recordings.
Args:
headers: dict containing HTTP headers which should be included in all
HTTP requests.
recordings: The initial recordings to be used for responses. This list
contains tuples in the form: (MockRequest, MockResponse)
real_client: An HttpClient which will make a real HTTP request. The
response will be converted into a MockResponse and stored in
recordings.
"""
self.recordings = recordings or []
self.real_client = real_client
self.headers = headers or {}
def add_response(self, response, operation, url, data=None, headers=None):
"""Adds a request-response pair to the recordings list.
After the recording is added, future matching requests will receive the
response.
Args:
response: MockResponse
operation: str
url: str
data: str, Currently the data is ignored when looking for matching
requests.
headers: dict of strings: Currently the headers are ignored when
looking for matching requests.
"""
request = MockRequest(operation, url, data=data, headers=headers)
self.recordings.append((request, response))
def request(self, operation, url, data=None, headers=None):
"""Returns a matching MockResponse from the recordings.
If the real_client is set, the request will be passed along and the
server's response will be added to the recordings and also returned.
If there is no match, a NoRecordingFound error will be raised.
"""
if self.real_client is None:
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for recording in self.recordings:
if recording[0].operation == operation and recording[0].url == url:
return recording[1]
raise NoRecordingFound('No recodings found for %s %s' % (
operation, url))
else:
# There is a real HTTP client, so make the request, and record the
# response.
response = self.real_client.request(operation, url, data=data,
headers=headers)
# TODO: copy the headers
stored_response = MockResponse(body=response, status=response.status,
reason=response.reason)
self.add_response(stored_response, operation, url, data=data,
headers=headers)
return stored_response
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
ATOM_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
APP_TEMPLATE_V1 = '{http://purl.org/atom/app#}%s'
APP_TEMPLATE_V2 = '{http://www.w3.org/2007/app}%s'
class Name(atom.core.XmlElement):
"""The atom:name element."""
_qname = ATOM_TEMPLATE % 'name'
class Email(atom.core.XmlElement):
"""The atom:email element."""
_qname = ATOM_TEMPLATE % 'email'
class Uri(atom.core.XmlElement):
"""The atom:uri element."""
_qname = ATOM_TEMPLATE % 'uri'
class Person(atom.core.XmlElement):
"""A foundation class which atom:author and atom:contributor extend.
A person contains information like name, email address, and web page URI for
an author or contributor to an Atom feed.
"""
name = Name
email = Email
uri = Uri
class Author(Person):
"""The atom:author element.
An author is a required element in Feed unless each Entry contains an Author.
"""
_qname = ATOM_TEMPLATE % 'author'
class Contributor(Person):
"""The atom:contributor element."""
_qname = ATOM_TEMPLATE % 'contributor'
class Link(atom.core.XmlElement):
"""The atom:link element."""
_qname = ATOM_TEMPLATE % 'link'
href = 'href'
rel = 'rel'
type = 'type'
hreflang = 'hreflang'
title = 'title'
length = 'length'
class Generator(atom.core.XmlElement):
"""The atom:generator element."""
_qname = ATOM_TEMPLATE % 'generator'
uri = 'uri'
version = 'version'
class Text(atom.core.XmlElement):
"""A foundation class from which atom:title, summary, etc. extend.
This class should never be instantiated.
"""
type = 'type'
class Title(Text):
"""The atom:title element."""
_qname = ATOM_TEMPLATE % 'title'
class Subtitle(Text):
"""The atom:subtitle element."""
_qname = ATOM_TEMPLATE % 'subtitle'
class Rights(Text):
"""The atom:rights element."""
_qname = ATOM_TEMPLATE % 'rights'
class Summary(Text):
"""The atom:summary element."""
_qname = ATOM_TEMPLATE % 'summary'
class Content(Text):
"""The atom:content element."""
_qname = ATOM_TEMPLATE % 'content'
src = 'src'
class Category(atom.core.XmlElement):
"""The atom:category element."""
_qname = ATOM_TEMPLATE % 'category'
term = 'term'
scheme = 'scheme'
label = 'label'
class Id(atom.core.XmlElement):
"""The atom:id element."""
_qname = ATOM_TEMPLATE % 'id'
class Icon(atom.core.XmlElement):
"""The atom:icon element."""
_qname = ATOM_TEMPLATE % 'icon'
class Logo(atom.core.XmlElement):
"""The atom:logo element."""
_qname = ATOM_TEMPLATE % 'logo'
class Draft(atom.core.XmlElement):
"""The app:draft element which indicates if this entry should be public."""
_qname = (APP_TEMPLATE_V1 % 'draft', APP_TEMPLATE_V2 % 'draft')
class Control(atom.core.XmlElement):
"""The app:control element indicating restrictions on publication.
The APP control element may contain a draft element indicating whether or
not this entry should be publicly available.
"""
_qname = (APP_TEMPLATE_V1 % 'control', APP_TEMPLATE_V2 % 'control')
draft = Draft
class Date(atom.core.XmlElement):
"""A parent class for atom:updated, published, etc."""
class Updated(Date):
"""The atom:updated element."""
_qname = ATOM_TEMPLATE % 'updated'
class Published(Date):
"""The atom:published element."""
_qname = ATOM_TEMPLATE % 'published'
class LinkFinder(object):
"""An "interface" providing methods to find link elements
Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in Atom entries and feeds.
"""
def find_url(self, rel):
"""Returns the URL in a link with the desired rel value."""
for link in self.link:
if link.rel == rel and link.href:
return link.href
return None
FindUrl = find_url
def get_link(self, rel):
"""Returns a link object which has the desired rel value.
If you are interested in the URL instead of the link object,
consider using find_url instead.
"""
for link in self.link:
if link.rel == rel and link.href:
return link
return None
GetLink = get_link
def find_self_link(self):
"""Find the first link with rel set to 'self'
Returns:
A str containing the link's href or None if none of the links had rel
equal to 'self'
"""
return self.find_url('self')
FindSelfLink = find_self_link
def get_self_link(self):
return self.get_link('self')
GetSelfLink = get_self_link
def find_edit_link(self):
return self.find_url('edit')
FindEditLink = find_edit_link
def get_edit_link(self):
return self.get_link('edit')
GetEditLink = get_edit_link
def find_edit_media_link(self):
link = self.find_url('edit-media')
# Search for media-edit as well since Picasa API used media-edit instead.
if link is None:
return self.find_url('media-edit')
return link
FindEditMediaLink = find_edit_media_link
def get_edit_media_link(self):
link = self.get_link('edit-media')
if link is None:
return self.get_link('media-edit')
return link
GetEditMediaLink = get_edit_media_link
def find_next_link(self):
return self.find_url('next')
FindNextLink = find_next_link
def get_next_link(self):
return self.get_link('next')
GetNextLink = get_next_link
def find_license_link(self):
return self.find_url('license')
FindLicenseLink = find_license_link
def get_license_link(self):
return self.get_link('license')
GetLicenseLink = get_license_link
def find_alternate_link(self):
return self.find_url('alternate')
FindAlternateLink = find_alternate_link
def get_alternate_link(self):
return self.get_link('alternate')
GetAlternateLink = get_alternate_link
class FeedEntryParent(atom.core.XmlElement, LinkFinder):
"""A super class for atom:feed and entry, contains shared attributes"""
author = [Author]
category = [Category]
contributor = [Contributor]
id = Id
link = [Link]
rights = Rights
title = Title
updated = Updated
def __init__(self, atom_id=None, text=None, *args, **kwargs):
if atom_id is not None:
self.id = atom_id
atom.core.XmlElement.__init__(self, text=text, *args, **kwargs)
class Source(FeedEntryParent):
"""The atom:source element."""
_qname = ATOM_TEMPLATE % 'source'
generator = Generator
icon = Icon
logo = Logo
subtitle = Subtitle
class Entry(FeedEntryParent):
"""The atom:entry element."""
_qname = ATOM_TEMPLATE % 'entry'
content = Content
published = Published
source = Source
summary = Summary
control = Control
class Feed(Source):
"""The atom:feed element which contains entries."""
_qname = ATOM_TEMPLATE % 'feed'
entry = [Entry]
class ExtensionElement(atom.core.XmlElement):
"""Provided for backwards compatibility to the v1 atom.ExtensionElement."""
def __init__(self, tag=None, namespace=None, attributes=None,
children=None, text=None, *args, **kwargs):
if namespace:
self._qname = '{%s}%s' % (namespace, tag)
else:
self._qname = tag
self.children = children or []
self.attributes = attributes or {}
self.text = text
_BecomeChildElement = atom.core.XmlElement._become_child
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import inspect
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
STRING_ENCODING = 'utf-8'
class XmlElement(object):
"""Represents an element node in an XML document.
The text member is a UTF-8 encoded str or unicode.
"""
_qname = None
_other_elements = None
_other_attributes = None
# The rule set contains mappings for XML qnames to child members and the
# appropriate member classes.
_rule_set = None
_members = None
text = None
def __init__(self, text=None, *args, **kwargs):
if ('_members' not in self.__class__.__dict__
or self.__class__._members is None):
self.__class__._members = tuple(self.__class__._list_xml_members())
for member_name, member_type in self.__class__._members:
if member_name in kwargs:
setattr(self, member_name, kwargs[member_name])
else:
if isinstance(member_type, list):
setattr(self, member_name, [])
else:
setattr(self, member_name, None)
self._other_elements = []
self._other_attributes = {}
if text is not None:
self.text = text
def _list_xml_members(cls):
"""Generator listing all members which are XML elements or attributes.
The following members would be considered XML members:
foo = 'abc' - indicates an XML attribute with the qname abc
foo = SomeElement - indicates an XML child element
foo = [AnElement] - indicates a repeating XML child element, each instance
will be stored in a list in this member
foo = ('att1', '{http://example.com/namespace}att2') - indicates an XML
attribute which has different parsing rules in different versions of
the protocol. Version 1 of the XML parsing rules will look for an
attribute with the qname 'att1' but verion 2 of the parsing rules will
look for a namespaced attribute with the local name of 'att2' and an
XML namespace of 'http://example.com/namespace'.
"""
members = []
for pair in inspect.getmembers(cls):
if not pair[0].startswith('_') and pair[0] != 'text':
member_type = pair[1]
if (isinstance(member_type, tuple) or isinstance(member_type, list)
or isinstance(member_type, (str, unicode))
or (inspect.isclass(member_type)
and issubclass(member_type, XmlElement))):
members.append(pair)
return members
_list_xml_members = classmethod(_list_xml_members)
def _get_rules(cls, version):
"""Initializes the _rule_set for the class which is used when parsing XML.
This method is used internally for parsing and generating XML for an
XmlElement. It is not recommended that you call this method directly.
Returns:
A tuple containing the XML parsing rules for the appropriate version.
The tuple looks like:
(qname, {sub_element_qname: (member_name, member_class, repeating), ..},
{attribute_qname: member_name})
To give a couple of concrete example, the atom.data.Control _get_rules
with version of 2 will return:
('{http://www.w3.org/2007/app}control',
{'{http://www.w3.org/2007/app}draft': ('draft',
<class 'atom.data.Draft'>,
False)},
{})
Calling _get_rules with version 1 on gdata.data.FeedLink will produce:
('{http://schemas.google.com/g/2005}feedLink',
{'{http://www.w3.org/2005/Atom}feed': ('feed',
<class 'gdata.data.GDFeed'>,
False)},
{'href': 'href', 'readOnly': 'read_only', 'countHint': 'count_hint',
'rel': 'rel'})
"""
# Initialize the _rule_set to make sure there is a slot available to store
# the parsing rules for this version of the XML schema.
# Look for rule set in the class __dict__ proxy so that only the
# _rule_set for this class will be found. By using the dict proxy
# we avoid finding rule_sets defined in superclasses.
# The four lines below provide support for any number of versions, but it
# runs a bit slower then hard coding slots for two versions, so I'm using
# the below two lines.
#if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
# cls._rule_set = []
#while len(cls.__dict__['_rule_set']) < version:
# cls._rule_set.append(None)
# If there is no rule set cache in the class, provide slots for two XML
# versions. If and when there is a version 3, this list will need to be
# expanded.
if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
cls._rule_set = [None, None]
# If a version higher than 2 is requested, fall back to version 2 because
# 2 is currently the highest supported version.
if version > 2:
return cls._get_rules(2)
# Check the dict proxy for the rule set to avoid finding any rule sets
# which belong to the superclass. We only want rule sets for this class.
if cls._rule_set[version-1] is None:
# The rule set for each version consists of the qname for this element
# ('{namespace}tag'), a dictionary (elements) for looking up the
# corresponding class member when given a child element's qname, and a
# dictionary (attributes) for looking up the corresponding class member
# when given an XML attribute's qname.
elements = {}
attributes = {}
if ('_members' not in cls.__dict__ or cls._members is None):
cls._members = tuple(cls._list_xml_members())
for member_name, target in cls._members:
if isinstance(target, list):
# This member points to a repeating element.
elements[_get_qname(target[0], version)] = (member_name, target[0],
True)
elif isinstance(target, tuple):
# This member points to a versioned XML attribute.
if version <= len(target):
attributes[target[version-1]] = member_name
else:
attributes[target[-1]] = member_name
elif isinstance(target, (str, unicode)):
# This member points to an XML attribute.
attributes[target] = member_name
elif issubclass(target, XmlElement):
# This member points to a single occurance element.
elements[_get_qname(target, version)] = (member_name, target, False)
version_rules = (_get_qname(cls, version), elements, attributes)
cls._rule_set[version-1] = version_rules
return version_rules
else:
return cls._rule_set[version-1]
_get_rules = classmethod(_get_rules)
def get_elements(self, tag=None, namespace=None, version=1):
"""Find all sub elements which match the tag and namespace.
To find all elements in this object, call get_elements with the tag and
namespace both set to None (the default). This method searches through
the object's members and the elements stored in _other_elements which
did not match any of the XML parsing rules for this class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching elements.
Returns:
A list of the matching XmlElements.
"""
matches = []
ignored1, elements, ignored2 = self.__class__._get_rules(version)
if elements:
for qname, element_def in elements.iteritems():
member = getattr(self, element_def[0])
if member:
if _qname_matches(tag, namespace, qname):
if element_def[2]:
# If this is a repeating element, copy all instances into the
# result list.
matches.extend(member)
else:
matches.append(member)
for element in self._other_elements:
if _qname_matches(tag, namespace, element._qname):
matches.append(element)
return matches
GetElements = get_elements
# FindExtensions and FindChildren are provided for backwards compatibility
# to the atom.AtomBase class.
# However, FindExtensions may return more results than the v1 atom.AtomBase
# method does, because get_elements searches both the expected children
# and the unexpected "other elements". The old AtomBase.FindExtensions
# method searched only "other elements" AKA extension_elements.
FindExtensions = get_elements
FindChildren = get_elements
def get_attributes(self, tag=None, namespace=None, version=1):
"""Find all attributes which match the tag and namespace.
To find all attributes in this object, call get_attributes with the tag
and namespace both set to None (the default). This method searches
through the object's members and the attributes stored in
_other_attributes which did not fit any of the XML parsing rules for this
class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching attributes.
Returns:
A list of XmlAttribute objects for the matching attributes.
"""
matches = []
ignored1, ignored2, attributes = self.__class__._get_rules(version)
if attributes:
for qname, attribute_def in attributes.iteritems():
if isinstance(attribute_def, (list, tuple)):
attribute_def = attribute_def[0]
member = getattr(self, attribute_def)
# TODO: ensure this hasn't broken existing behavior.
#member = getattr(self, attribute_def[0])
if member:
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, member))
for qname, value in self._other_attributes.iteritems():
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, value))
return matches
GetAttributes = get_attributes
def _harvest_tree(self, tree, version=1):
"""Populates object members from the data in the tree Element."""
qname, elements, attributes = self.__class__._get_rules(version)
for element in tree:
if elements and element.tag in elements:
definition = elements[element.tag]
# If this is a repeating element, make sure the member is set to a
# list.
if definition[2]:
if getattr(self, definition[0]) is None:
setattr(self, definition[0], [])
getattr(self, definition[0]).append(_xml_element_from_tree(element,
definition[1], version))
else:
setattr(self, definition[0], _xml_element_from_tree(element,
definition[1], version))
else:
self._other_elements.append(_xml_element_from_tree(element, XmlElement,
version))
for attrib, value in tree.attrib.iteritems():
if attributes and attrib in attributes:
setattr(self, attributes[attrib], value)
else:
self._other_attributes[attrib] = value
if tree.text:
self.text = tree.text
def _to_tree(self, version=1, encoding=None):
new_tree = ElementTree.Element(_get_qname(self, version))
self._attach_members(new_tree, version, encoding)
return new_tree
def _attach_members(self, tree, version=1, encoding=None):
"""Convert members to XML elements/attributes and add them to the tree.
Args:
tree: An ElementTree.Element which will be modified. The members of
this object will be added as child elements or attributes
according to the rules described in _expected_elements and
_expected_attributes. The elements and attributes stored in
other_attributes and other_elements are also added a children
of this tree.
version: int Ingnored in this method but used by VersionedElement.
encoding: str (optional)
"""
qname, elements, attributes = self.__class__._get_rules(version)
encoding = encoding or STRING_ENCODING
# Add the expected elements and attributes to the tree.
if elements:
for tag, element_def in elements.iteritems():
member = getattr(self, element_def[0])
# If this is a repeating element and there are members in the list.
if member and element_def[2]:
for instance in member:
instance._become_child(tree, version)
elif member:
member._become_child(tree, version)
if attributes:
for attribute_tag, member_name in attributes.iteritems():
value = getattr(self, member_name)
if value:
tree.attrib[attribute_tag] = value
# Add the unexpected (other) elements and attributes to the tree.
for element in self._other_elements:
element._become_child(tree, version)
for key, value in self._other_attributes.iteritems():
# I'm not sure if unicode can be used in the attribute name, so for now
# we assume the encoding is correct for the attribute name.
if not isinstance(value, unicode):
value = value.decode(encoding)
tree.attrib[key] = value
if self.text:
if isinstance(self.text, unicode):
tree.text = self.text
else:
tree.text = self.text.decode(encoding)
def to_string(self, version=1, encoding=None):
"""Converts this object to XML."""
return ElementTree.tostring(self._to_tree(version, encoding))
ToString = to_string
def __str__(self):
return self.to_string()
def _become_child(self, tree, version=1):
"""Adds a child element to tree with the XML data in self."""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = _get_qname(self, version)
self._attach_members(new_child, version)
def __get_extension_elements(self):
return self._other_elements
def __set_extension_elements(self, elements):
self._other_elements = elements
extension_elements = property(__get_extension_elements,
__set_extension_elements,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def __get_extension_attributes(self):
return self._other_attributes
def __set_extension_attributes(self, attributes):
self._other_attributes = attributes
extension_attributes = property(__get_extension_attributes,
__set_extension_attributes,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def _get_tag(self, version=1):
qname = _get_qname(self, version)
return qname[qname.find('}')+1:]
def _get_namespace(self, version=1):
qname = _get_qname(self, version)
if qname.startswith('{'):
return qname[1:qname.find('}')]
else:
return None
def _set_tag(self, tag):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if self._qname[0].startswith('{'):
self._qname[0] = '{%s}%s' % (self._get_namespace(1), tag)
else:
self._qname[0] = tag
else:
if self._qname.startswith('{'):
self._qname = '{%s}%s' % (self._get_namespace(), tag)
else:
self._qname = tag
def _set_namespace(self, namespace):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if namespace:
self._qname[0] = '{%s}%s' % (namespace, self._get_tag(1))
else:
self._qname[0] = self._get_tag(1)
else:
if namespace:
self._qname = '{%s}%s' % (namespace, self._get_tag(1))
else:
self._qname = self._get_tag(1)
tag = property(_get_tag, _set_tag,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
namespace = property(_get_namespace, _set_namespace,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
# Provided for backwards compatibility to atom.ExtensionElement
children = extension_elements
attributes = extension_attributes
def _get_qname(element, version):
if isinstance(element._qname, tuple):
if version <= len(element._qname):
return element._qname[version-1]
else:
return element._qname[-1]
else:
return element._qname
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None))
def parse(xml_string, target_class=None, version=1, encoding=None):
"""Parses the XML string according to the rules for the target_class.
Args:
xml_string: str or unicode
target_class: XmlElement or a subclass. If None is specified, the
XmlElement class is used.
version: int (optional) The version of the schema which should be used when
converting the XML into an object. The default is 1.
encoding: str (optional) The character encoding of the bytes in the
xml_string. Default is 'UTF-8'.
"""
if target_class is None:
target_class = XmlElement
if isinstance(xml_string, unicode):
if encoding is None:
xml_string = xml_string.encode(STRING_ENCODING)
else:
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _xml_element_from_tree(tree, target_class, version)
Parse = parse
xml_element_from_string = parse
XmlElementFromString = xml_element_from_string
def _xml_element_from_tree(tree, target_class, version=1):
if target_class._qname is None:
instance = target_class()
instance._qname = tree.tag
instance._harvest_tree(tree, version)
return instance
# TODO handle the namespace-only case
# Namespace only will be used with Google Spreadsheets rows and
# Google Base item attributes.
elif tree.tag == _get_qname(target_class, version):
instance = target_class()
instance._harvest_tree(tree, version)
return instance
return None
class XmlAttribute(object):
def __init__(self, qname, value):
self._qname = qname
self.value = value
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtomPubClient provides CRUD ops. in line with the Atom Publishing Protocol.
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.http_core
class Error(Exception):
pass
class MissingHost(Error):
pass
class AtomPubClient(object):
host = None
auth_token = None
ssl = False # Whether to force all requests over https
def __init__(self, http_client=None, host=None,
auth_token=None, source=None, **kwargs):
"""Creates a new AtomPubClient instance.
Args:
source: The name of your application.
http_client: An object capable of performing HTTP requests through a
request method. This object is used to perform the request
when the AtomPubClient's request method is called. Used to
allow HTTP requests to be directed to a mock server, or use
an alternate library instead of the default of httplib to
make HTTP requests.
host: str The default host name to use if a host is not specified in the
requested URI.
auth_token: An object which sets the HTTP Authorization header when its
modify_request method is called.
"""
self.http_client = http_client or atom.http_core.ProxiedHttpClient()
if host is not None:
self.host = host
if auth_token is not None:
self.auth_token = auth_token
self.source = source
def request(self, method=None, uri=None, auth_token=None,
http_request=None, **kwargs):
"""Performs an HTTP request to the server indicated.
Uses the http_client instance to make the request.
Args:
method: The HTTP method as a string, usually one of 'GET', 'POST',
'PUT', or 'DELETE'
uri: The URI desired as a string or atom.http_core.Uri.
http_request:
auth_token: An authorization token object whose modify_request method
sets the HTTP Authorization header.
Returns:
The results of calling self.http_client.request. With the default
http_client, this is an HTTP response object.
"""
# Modify the request based on the AtomPubClient settings and parameters
# passed in to the request.
http_request = self.modify_request(http_request)
if isinstance(uri, (str, unicode)):
uri = atom.http_core.Uri.parse_uri(uri)
if uri is not None:
uri.modify_request(http_request)
if isinstance(method, (str, unicode)):
http_request.method = method
# Any unrecognized arguments are assumed to be capable of modifying the
# HTTP request.
for name, value in kwargs.iteritems():
if value is not None:
value.modify_request(http_request)
# Default to an http request if the protocol scheme is not set.
if http_request.uri.scheme is None:
http_request.uri.scheme = 'http'
# Override scheme. Force requests over https.
if self.ssl:
http_request.uri.scheme = 'https'
if http_request.uri.path is None:
http_request.uri.path = '/'
# Add the Authorization header at the very end. The Authorization header
# value may need to be calculated using information in the request.
if auth_token:
auth_token.modify_request(http_request)
elif self.auth_token:
self.auth_token.modify_request(http_request)
# Check to make sure there is a host in the http_request.
if http_request.uri.host is None:
raise MissingHost('No host provided in request %s %s' % (
http_request.method, str(http_request.uri)))
# Perform the fully specified request using the http_client instance.
# Sends the request to the server and returns the server's response.
return self.http_client.request(http_request)
Request = request
def get(self, uri=None, auth_token=None, http_request=None, **kwargs):
"""Performs a request using the GET method, returns an HTTP response."""
return self.request(method='GET', uri=uri, auth_token=auth_token,
http_request=http_request, **kwargs)
Get = get
def post(self, uri=None, data=None, auth_token=None, http_request=None,
**kwargs):
"""Sends data using the POST method, returns an HTTP response."""
return self.request(method='POST', uri=uri, auth_token=auth_token,
http_request=http_request, data=data, **kwargs)
Post = post
def put(self, uri=None, data=None, auth_token=None, http_request=None,
**kwargs):
"""Sends data using the PUT method, returns an HTTP response."""
return self.request(method='PUT', uri=uri, auth_token=auth_token,
http_request=http_request, data=data, **kwargs)
Put = put
def delete(self, uri=None, auth_token=None, http_request=None, **kwargs):
"""Performs a request using the DELETE method, returns an HTTP response."""
return self.request(method='DELETE', uri=uri, auth_token=auth_token,
http_request=http_request, **kwargs)
Delete = delete
def modify_request(self, http_request):
"""Changes the HTTP request before sending it to the server.
Sets the User-Agent HTTP header and fills in the HTTP host portion
of the URL if one was not included in the request (for this it uses
the self.host member if one is set). This method is called in
self.request.
Args:
http_request: An atom.http_core.HttpRequest() (optional) If one is
not provided, a new HttpRequest is instantiated.
Returns:
An atom.http_core.HttpRequest() with the User-Agent header set and
if this client has a value in its host member, the host in the request
URL is set.
"""
if http_request is None:
http_request = atom.http_core.HttpRequest()
if self.host is not None and http_request.uri.host is None:
http_request.uri.host = self.host
# Set the user agent header for logging purposes.
if self.source:
http_request.headers['User-Agent'] = '%s gdata-py/2.0.9' % self.source
else:
http_request.headers['User-Agent'] = 'gdata-py/2.0.9'
return http_request
ModifyRequest = modify_request
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import httplib
import atom.url
import atom.http_interface
import socket
import base64
import atom.http_core
ssl_imported = False
ssl = None
try:
import ssl
ssl_imported = True
except ImportError:
pass
class ProxyError(atom.http_interface.Error):
pass
class TestConfigurationError(Exception):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
# Added to allow old v1 HttpClient objects to use the new
# http_code.HttpClient. Used in unit tests to inject a mock client.
v2_http_client = None
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, types.StringTypes):
all_headers['Content-Length'] = str(len(data))
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
if self.v2_http_client is not None:
http_request = atom.http_core.HttpRequest(method=operation)
atom.http_core.Uri.parse_uri(str(url)).modify_request(http_request)
http_request.headers = all_headers
if data:
http_request._body_parts.append(data)
return self.v2_http_client.request(http_request=http_request)
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url),
skip_host=True)
if url.port is not None:
connection.putheader('Host', '%s:%s' % (url.host, url.port))
else:
connection.putheader('Host', url.host)
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (url.protocol == 'https' and int(url.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % url.host
replacement_header_line = 'Host: %s' % url.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return httplib.HTTPSConnection(url.host)
return httplib.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return httplib.HTTPConnection(url.host)
return httplib.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.to_string()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable.
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_auth = _get_proxy_auth()
if url.protocol == 'https':
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = ''
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl_imported:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, None)
sslobj = httplib.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_url.host)
connection.sock = sslobj
return connection
else:
# The request was HTTPS, but there was no https_proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy = os.environ.get('http_proxy')
if proxy:
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port))
else:
# The request was HTTP, but there was no http_proxy set.
return HttpClient._prepare_connection(self, url, headers)
def _get_access_url(self, url):
return url.to_string()
def _get_proxy_auth():
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _send_data_part(data, connection):
if isinstance(data, types.StringTypes):
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import StringIO
import pickle
import os.path
import tempfile
import atom.http_core
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockHttpClient(object):
debug = None
real_client = None
last_request_was_live = False
# The following members are used to construct the session cache temp file
# name.
# These are combined to form the file name
# /tmp/cache_prefix.cache_case_name.cache_test_name
cache_name_prefix = 'gdata_live_test'
cache_case_name = ''
cache_test_name = ''
def __init__(self, recordings=None, real_client=None):
self._recordings = recordings or []
if real_client is not None:
self.real_client = real_client
def add_response(self, http_request, status, reason, headers=None,
body=None):
response = MockHttpResponse(status, reason, headers, body)
# TODO Scrub the request and the response.
self._recordings.append((http_request._copy(), response))
AddResponse = add_response
def request(self, http_request):
"""Provide a recorded response, or record a response for replay.
If the real_client is set, the request will be made using the
real_client, and the response from the server will be recorded.
If the real_client is None (the default), this method will examine
the recordings and find the first which matches.
"""
request = http_request._copy()
_scrub_request(request)
if self.real_client is None:
self.last_request_was_live = False
for recording in self._recordings:
if _match_request(recording[0], request):
return recording[1]
else:
# Pass along the debug settings to the real client.
self.real_client.debug = self.debug
# Make an actual request since we can use the real HTTP client.
self.last_request_was_live = True
response = self.real_client.request(http_request)
scrubbed_response = _scrub_response(response)
self.add_response(request, scrubbed_response.status,
scrubbed_response.reason,
dict(atom.http_core.get_headers(scrubbed_response)),
scrubbed_response.read())
# Return the recording which we just added.
return self._recordings[-1][1]
raise NoRecordingFound('No recoding was found for request: %s %s' % (
request.method, str(request.uri)))
Request = request
def _save_recordings(self, filename):
recording_file = open(os.path.join(tempfile.gettempdir(), filename),
'wb')
pickle.dump(self._recordings, recording_file)
recording_file.close()
def _load_recordings(self, filename):
recording_file = open(os.path.join(tempfile.gettempdir(), filename),
'rb')
self._recordings = pickle.load(recording_file)
recording_file.close()
def _delete_recordings(self, filename):
full_path = os.path.join(tempfile.gettempdir(), filename)
if os.path.exists(full_path):
os.remove(full_path)
def _load_or_use_client(self, filename, http_client):
if os.path.exists(os.path.join(tempfile.gettempdir(), filename)):
self._load_recordings(filename)
else:
self.real_client = http_client
def use_cached_session(self, name=None, real_http_client=None):
"""Attempts to load recordings from a previous live request.
If a temp file with the recordings exists, then it is used to fulfill
requests. If the file does not exist, then a real client is used to
actually make the desired HTTP requests. Requests and responses are
recorded and will be written to the desired temprary cache file when
close_session is called.
Args:
name: str (optional) The file name of session file to be used. The file
is loaded from the temporary directory of this machine. If no name
is passed in, a default name will be constructed using the
cache_name_prefix, cache_case_name, and cache_test_name of this
object.
real_http_client: atom.http_core.HttpClient the real client to be used
if the cached recordings are not found. If the default
value is used, this will be an
atom.http_core.HttpClient.
"""
if real_http_client is None:
real_http_client = atom.http_core.HttpClient()
if name is None:
self._recordings_cache_name = self.get_cache_file_name()
else:
self._recordings_cache_name = name
self._load_or_use_client(self._recordings_cache_name, real_http_client)
def close_session(self):
"""Saves recordings in the temporary file named in use_cached_session."""
if self.real_client is not None:
self._save_recordings(self._recordings_cache_name)
def delete_session(self, name=None):
"""Removes recordings from a previous live request."""
if name is None:
self._delete_recordings(self._recordings_cache_name)
else:
self._delete_recordings(name)
def get_cache_file_name(self):
return '%s.%s.%s' % (self.cache_name_prefix, self.cache_case_name,
self.cache_test_name)
def _dump(self):
"""Provides debug information in a string."""
output = 'MockHttpClient\n real_client: %s\n cache file name: %s\n' % (
self.real_client, self.get_cache_file_name())
output += ' recordings:\n'
i = 0
for recording in self._recordings:
output += ' recording %i is for: %s %s\n' % (
i, recording[0].method, str(recording[0].uri))
i += 1
return output
def _match_request(http_request, stored_request):
"""Determines whether a request is similar enough to a stored request
to cause the stored response to be returned."""
# Check to see if the host names match.
if (http_request.uri.host is not None
and http_request.uri.host != stored_request.uri.host):
return False
# Check the request path in the URL (/feeds/private/full/x)
elif http_request.uri.path != stored_request.uri.path:
return False
# Check the method used in the request (GET, POST, etc.)
elif http_request.method != stored_request.method:
return False
# If there is a gsession ID in either request, make sure that it is matched
# exactly.
elif ('gsessionid' in http_request.uri.query
or 'gsessionid' in stored_request.uri.query):
if 'gsessionid' not in stored_request.uri.query:
return False
elif 'gsessionid' not in http_request.uri.query:
return False
elif (http_request.uri.query['gsessionid']
!= stored_request.uri.query['gsessionid']):
return False
# Ignores differences in the query params (?start-index=5&max-results=20),
# the body of the request, the port number, HTTP headers, just to name a
# few.
return True
def _scrub_request(http_request):
""" Removes email address and password from a client login request.
Since the mock server saves the request and response in plantext, sensitive
information like the password should be removed before saving the
recordings. At the moment only requests sent to a ClientLogin url are
scrubbed.
"""
if (http_request and http_request.uri and http_request.uri.path and
http_request.uri.path.endswith('ClientLogin')):
# Remove the email and password from a ClientLogin request.
http_request._body_parts = []
http_request.add_form_inputs(
{'form_data': 'client login request has been scrubbed'})
else:
# We can remove the body of the post from the recorded request, since
# the request body is not used when finding a matching recording.
http_request._body_parts = []
return http_request
def _scrub_response(http_response):
return http_response
class EchoHttpClient(object):
"""Sends the request data back in the response.
Used to check the formatting of the request as it was sent. Always responds
with a 200 OK, and some information from the HTTP request is returned in
special Echo-X headers in the response. The following headers are added
in the response:
'Echo-Host': The host name and port number to which the HTTP connection is
made. If no port was passed in, the header will contain
host:None.
'Echo-Uri': The path portion of the URL being requested. /example?x=1&y=2
'Echo-Scheme': The beginning of the URL, usually 'http' or 'https'
'Echo-Method': The HTTP method being used, 'GET', 'POST', 'PUT', etc.
"""
def request(self, http_request):
return self._http_request(http_request.uri, http_request.method,
http_request.headers, http_request._body_parts)
def _http_request(self, uri, method, headers=None, body_parts=None):
body = StringIO.StringIO()
response = atom.http_core.HttpResponse(status=200, reason='OK', body=body)
if headers is None:
response._headers = {}
else:
# Copy headers from the request to the response but convert values to
# strings. Server response headers always come in as strings, so an int
# should be converted to a corresponding string when echoing.
for header, value in headers.iteritems():
response._headers[header] = str(value)
response._headers['Echo-Host'] = '%s:%s' % (uri.host, str(uri.port))
response._headers['Echo-Uri'] = uri._get_relative_path()
response._headers['Echo-Scheme'] = uri.scheme
response._headers['Echo-Method'] = method
for part in body_parts:
if isinstance(part, str):
body.write(part)
elif hasattr(part, 'read'):
body.write(part.read())
body.seek(0)
return response
class SettableHttpClient(object):
"""An HTTP Client which responds with the data given in set_response."""
def __init__(self, status, reason, body, headers):
"""Configures the response for the server.
See set_response for details on the arguments to the constructor.
"""
self.set_response(status, reason, body, headers)
self.last_request = None
def set_response(self, status, reason, body, headers):
"""Determines the response which will be sent for each request.
Args:
status: An int for the HTTP status code, example: 200, 404, etc.
reason: String for the HTTP reason, example: OK, NOT FOUND, etc.
body: The body of the HTTP response as a string or a file-like
object (something with a read method).
headers: dict of strings containing the HTTP headers in the response.
"""
self.response = atom.http_core.HttpResponse(status=status, reason=reason,
body=body)
self.response._headers = headers.copy()
def request(self, http_request):
self.last_request = http_request
return self.response
class MockHttpResponse(atom.http_core.HttpResponse):
def __init__(self, status=None, reason=None, headers=None, body=None):
self._headers = headers or {}
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if body is not None:
# Instead of using a file-like object for the body, store as a string
# so that reads can be repeated.
if hasattr(body, 'read'):
self._body = body.read()
else:
self._body = body
def read(self):
return self._body
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MockService provides CRUD ops. for mocking calls to AtomPub services.
MockService: Exposes the publicly used methods of AtomService to provide
a mock interface which can be used in unit tests.
"""
import atom.service
import pickle
__author__ = 'api.jscudder (Jeffrey Scudder)'
# Recordings contains pairings of HTTP MockRequest objects with MockHttpResponse objects.
recordings = []
# If set, the mock service HttpRequest are actually made through this object.
real_request_handler = None
def ConcealValueWithSha(source):
import sha
return sha.new(source[:-5]).hexdigest()
def DumpRecordings(conceal_func=ConcealValueWithSha):
if conceal_func:
for recording_pair in recordings:
recording_pair[0].ConcealSecrets(conceal_func)
return pickle.dumps(recordings)
def LoadRecordings(recordings_file_or_string):
if isinstance(recordings_file_or_string, str):
atom.mock_service.recordings = pickle.loads(recordings_file_or_string)
elif hasattr(recordings_file_or_string, 'read'):
atom.mock_service.recordings = pickle.loads(
recordings_file_or_string.read())
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Simulates an HTTP call to the server, makes an actual HTTP request if
real_request_handler is set.
This function operates in two different modes depending on if
real_request_handler is set or not. If real_request_handler is not set,
HttpRequest will look in this module's recordings list to find a response
which matches the parameters in the function call. If real_request_handler
is set, this function will call real_request_handler.HttpRequest, add the
response to the recordings list, and respond with the actual response.
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
full_uri = atom.service.BuildUri(uri, url_params, escape_params)
(server, port, ssl, uri) = atom.service.ProcessUrl(service, uri)
current_request = MockRequest(operation, full_uri, host=server, ssl=ssl,
data=data, extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, content_type=content_type)
# If the request handler is set, we should actually make the request using
# the request handler and record the response to replay later.
if real_request_handler:
response = real_request_handler.HttpRequest(service, operation, data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, content_type=content_type)
# TODO: need to copy the HTTP headers from the real response into the
# recorded_response.
recorded_response = MockHttpResponse(body=response.read(),
status=response.status, reason=response.reason)
# Insert a tuple which maps the request to the response object returned
# when making an HTTP call using the real_request_handler.
recordings.append((current_request, recorded_response))
return recorded_response
else:
# Look through available recordings to see if one matches the current
# request.
for request_response_pair in recordings:
if request_response_pair[0].IsMatch(current_request):
return request_response_pair[1]
return None
class MockRequest(object):
"""Represents a request made to an AtomPub server.
These objects are used to determine if a client request matches a recorded
HTTP request to determine what the mock server's response will be.
"""
def __init__(self, operation, uri, host=None, ssl=False, port=None,
data=None, extra_headers=None, url_params=None, escape_params=True,
content_type='application/atom+xml'):
"""Constructor for a MockRequest
Args:
operation: str One of 'GET', 'POST', 'PUT', or 'DELETE' this is the
HTTP operation requested on the resource.
uri: str The URL describing the resource to be modified or feed to be
retrieved. This should include the protocol (http/https) and the host
(aka domain). For example, these are some valud full_uris:
'http://example.com', 'https://www.google.com/accounts/ClientLogin'
host: str (optional) The server name which will be placed at the
beginning of the URL if the uri parameter does not begin with 'http'.
Examples include 'example.com', 'www.google.com', 'www.blogger.com'.
ssl: boolean (optional) If true, the request URL will begin with https
instead of http.
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string. (optional)
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, the constructor
will read the entire file into memory. If the data is a list of
parts to be sent, each part will be evaluated and stored.
extra_headers: dict (optional) HTTP headers included in the request.
url_params: dict (optional) Key value pairs which should be added to
the URL as URL parameters in the request. For example uri='/',
url_parameters={'foo':'1','bar':'2'} could become '/?foo=1&bar=2'.
escape_params: boolean (optional) Perform URL escaping on the keys and
values specified in url_params. Defaults to True.
content_type: str (optional) Provides the MIME type of the data being
sent.
"""
self.operation = operation
self.uri = _ConstructFullUrlBase(uri, host=host, ssl=ssl)
self.data = data
self.extra_headers = extra_headers
self.url_params = url_params or {}
self.escape_params = escape_params
self.content_type = content_type
def ConcealSecrets(self, conceal_func):
"""Conceal secret data in this request."""
if self.extra_headers.has_key('Authorization'):
self.extra_headers['Authorization'] = conceal_func(
self.extra_headers['Authorization'])
def IsMatch(self, other_request):
"""Check to see if the other_request is equivalent to this request.
Used to determine if a recording matches an incoming request so that a
recorded response should be sent to the client.
The matching is not exact, only the operation and URL are examined
currently.
Args:
other_request: MockRequest The request which we want to check this
(self) MockRequest against to see if they are equivalent.
"""
# More accurate matching logic will likely be required.
return (self.operation == other_request.operation and self.uri ==
other_request.uri)
def _ConstructFullUrlBase(uri, host=None, ssl=False):
"""Puts URL components into the form http(s)://full.host.strinf/uri/path
Used to construct a roughly canonical URL so that URLs which begin with
'http://example.com/' can be compared to a uri of '/' when the host is
set to 'example.com'
If the uri contains 'http://host' already, the host and ssl parameters
are ignored.
Args:
uri: str The path component of the URL, examples include '/'
host: str (optional) The host name which should prepend the URL. Example:
'example.com'
ssl: boolean (optional) If true, the returned URL will begin with https
instead of http.
Returns:
String which has the form http(s)://example.com/uri/string/contents
"""
if uri.startswith('http'):
return uri
if ssl:
return 'https://%s%s' % (host, uri)
else:
return 'http://%s%s' % (host, uri)
class MockHttpResponse(object):
"""Returned from MockService crud methods as the server's response."""
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Construct a mock HTTPResponse and set members.
Args:
body: str (optional) The HTTP body of the server's response.
status: int (optional)
reason: str (optional)
headers: dict (optional)
"""
self.body = body
self.status = status
self.reason = reason
self.headers = headers or {}
def read(self):
return self.body
def getheader(self, header_name):
return self.headers[header_name]
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a TokenStore class which is designed to manage
auth tokens required for different services.
Each token is valid for a set of scopes which is the start of a URL. An HTTP
client will use a token store to find a valid Authorization header to send
in requests to the specified URL. If the HTTP client determines that a token
has expired or been revoked, it can remove the token from the store so that
it will not be used in future requests.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
SCOPE_ALL = 'http'
class TokenStore(object):
"""Manages Authorization tokens which will be sent in HTTP headers."""
def __init__(self, scoped_tokens=None):
self._tokens = scoped_tokens or {}
def add_token(self, token):
"""Adds a new token to the store (replaces tokens with the same scope).
Args:
token: A subclass of http_interface.GenericToken. The token object is
responsible for adding the Authorization header to the HTTP request.
The scopes defined in the token are used to determine if the token
is valid for a requested scope when find_token is called.
Returns:
True if the token was added, False if the token was not added becase
no scopes were provided.
"""
if not hasattr(token, 'scopes') or not token.scopes:
return False
for scope in token.scopes:
self._tokens[str(scope)] = token
return True
def find_token(self, url):
"""Selects an Authorization header token which can be used for the URL.
Args:
url: str or atom.url.Url or a list containing the same.
The URL which is going to be requested. All
tokens are examined to see if any scopes begin match the beginning
of the URL. The first match found is returned.
Returns:
The token object which should execute the HTTP request. If there was
no token for the url (the url did not begin with any of the token
scopes available), then the atom.http_interface.GenericToken will be
returned because the GenericToken calls through to the http client
without adding an Authorization header.
"""
if url is None:
return None
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
if url in self._tokens:
token = self._tokens[url]
if token.valid_for_scope(url):
return token
else:
del self._tokens[url]
for scope, token in self._tokens.iteritems():
if token.valid_for_scope(url):
return token
return atom.http_interface.GenericToken()
def remove_token(self, token):
"""Removes the token from the token_store.
This method is used when a token is determined to be invalid. If the
token was found by find_token, but resulted in a 401 or 403 error stating
that the token was invlid, then the token should be removed to prevent
future use.
Returns:
True if a token was found and then removed from the token
store. False if the token was not in the TokenStore.
"""
token_found = False
scopes_to_delete = []
for scope, stored_token in self._tokens.iteritems():
if stored_token == token:
scopes_to_delete.append(scope)
token_found = True
for scope in scopes_to_delete:
del self._tokens[scope]
return token_found
def remove_all_tokens(self):
self._tokens = {}
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import urlparse
import urllib
DEFAULT_PROTOCOL = 'http'
DEFAULT_PORT = 80
def parse_url(url_string):
"""Creates a Url object which corresponds to the URL string.
This method can accept partial URLs, but it will leave missing
members of the Url unset.
"""
parts = urlparse.urlparse(url_string)
url = Url()
if parts[0]:
url.protocol = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
url.host = host_parts[0]
if len(host_parts) > 1:
url.port = host_parts[1]
if parts[2]:
url.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
url.params[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
url.params[urllib.unquote_plus(pair_parts[0])] = None
return url
class Url(object):
"""Represents a URL and implements comparison logic.
URL strings which are not identical can still be equivalent, so this object
provides a better interface for comparing and manipulating URLs than
strings. URL parameters are represented as a dictionary of strings, and
defaults are used for the protocol (http) and port (80) if not provided.
"""
def __init__(self, protocol=None, host=None, port=None, path=None,
params=None):
self.protocol = protocol
self.host = host
self.port = port
self.path = path
self.params = params or {}
def to_string(self):
url_parts = ['', '', '', '', '', '']
if self.protocol:
url_parts[0] = self.protocol
if self.host:
if self.port:
url_parts[1] = ':'.join((self.host, str(self.port)))
else:
url_parts[1] = self.host
if self.path:
url_parts[2] = self.path
if self.params:
url_parts[4] = self.get_param_string()
return urlparse.urlunparse(url_parts)
def get_param_string(self):
param_pairs = []
for key, value in self.params.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def get_request_uri(self):
"""Returns the path with the parameters escaped and appended."""
param_string = self.get_param_string()
if param_string:
return '?'.join([self.path, param_string])
else:
return self.path
def __cmp__(self, other):
if not isinstance(other, Url):
return cmp(self.to_string(), str(other))
difference = 0
# Compare the protocol
if self.protocol and other.protocol:
difference = cmp(self.protocol, other.protocol)
elif self.protocol and not other.protocol:
difference = cmp(self.protocol, DEFAULT_PROTOCOL)
elif not self.protocol and other.protocol:
difference = cmp(DEFAULT_PROTOCOL, other.protocol)
if difference != 0:
return difference
# Compare the host
difference = cmp(self.host, other.host)
if difference != 0:
return difference
# Compare the port
if self.port and other.port:
difference = cmp(self.port, other.port)
elif self.port and not other.port:
difference = cmp(self.port, DEFAULT_PORT)
elif not self.port and other.port:
difference = cmp(DEFAULT_PORT, other.port)
if difference != 0:
return difference
# Compare the path
difference = cmp(self.path, other.path)
if difference != 0:
return difference
# Compare the parameters
return cmp(self.params, other.params)
def __str__(self):
return self.to_string()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# TODO: add proxy handling.
__author__ = 'j.s@google.com (Jeff Scudder)'
import os
import StringIO
import urlparse
import urllib
import httplib
ssl = None
try:
import ssl
except ImportError:
pass
class Error(Exception):
pass
class UnknownSize(Error):
pass
class ProxyError(Error):
pass
MIME_BOUNDARY = 'END_OF_PART'
def get_headers(http_response):
"""Retrieves all HTTP headers from an HTTP response from the server.
This method is provided for backwards compatibility for Python2.2 and 2.3.
The httplib.HTTPResponse object in 2.2 and 2.3 does not have a getheaders
method so this function will use getheaders if available, but if not it
will retrieve a few using getheader.
"""
if hasattr(http_response, 'getheaders'):
return http_response.getheaders()
else:
headers = []
for header in (
'location', 'content-type', 'content-length', 'age', 'allow',
'cache-control', 'content-location', 'content-encoding', 'date',
'etag', 'expires', 'last-modified', 'pragma', 'server',
'set-cookie', 'transfer-encoding', 'vary', 'via', 'warning',
'www-authenticate', 'gdata-version'):
value = http_response.getheader(header, None)
if value is not None:
headers.append((header, value))
return headers
class HttpRequest(object):
"""Contains all of the parameters for an HTTP 1.1 request.
The HTTP headers are represented by a dictionary, and it is the
responsibility of the user to ensure that duplicate field names are combined
into one header value according to the rules in section 4.2 of RFC 2616.
"""
method = None
uri = None
def __init__(self, uri=None, method=None, headers=None):
"""Construct an HTTP request.
Args:
uri: The full path or partial path as a Uri object or a string.
method: The HTTP method for the request, examples include 'GET', 'POST',
etc.
headers: dict of strings The HTTP headers to include in the request.
"""
self.headers = headers or {}
self._body_parts = []
if method is not None:
self.method = method
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
self.uri = uri or Uri()
def add_body_part(self, data, mime_type, size=None):
"""Adds data to the HTTP request body.
If more than one part is added, this is assumed to be a mime-multipart
request. This method is designed to create MIME 1.0 requests as specified
in RFC 1341.
Args:
data: str or a file-like object containing a part of the request body.
mime_type: str The MIME type describing the data
size: int Required if the data is a file like object. If the data is a
string, the size is calculated so this parameter is ignored.
"""
if isinstance(data, str):
size = len(data)
if size is None:
# TODO: support chunked transfer if some of the body is of unknown size.
raise UnknownSize('Each part of the body must have a known size.')
if 'Content-Length' in self.headers:
content_length = int(self.headers['Content-Length'])
else:
content_length = 0
# If this is the first part added to the body, then this is not a multipart
# request.
if len(self._body_parts) == 0:
self.headers['Content-Type'] = mime_type
content_length = size
self._body_parts.append(data)
elif len(self._body_parts) == 1:
# This is the first member in a mime-multipart request, so change the
# _body_parts list to indicate a multipart payload.
self._body_parts.insert(0, 'Media multipart posting')
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
content_length += len(boundary_string) + size
self._body_parts.insert(1, boundary_string)
content_length += len('Media multipart posting')
# Put the content type of the first part of the body into the multipart
# payload.
original_type_string = 'Content-Type: %s\r\n\r\n' % (
self.headers['Content-Type'],)
self._body_parts.insert(2, original_type_string)
content_length += len(original_type_string)
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.append(boundary_string)
content_length += len(boundary_string)
# Change the headers to indicate this is now a mime multipart request.
self.headers['Content-Type'] = 'multipart/related; boundary="%s"' % (
MIME_BOUNDARY,)
self.headers['MIME-version'] = '1.0'
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.append(type_string)
content_length += len(type_string)
self._body_parts.append(data)
ending_boundary_string = '\r\n--%s--' % (MIME_BOUNDARY,)
self._body_parts.append(ending_boundary_string)
content_length += len(ending_boundary_string)
else:
# This is a mime multipart request.
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.insert(-1, boundary_string)
content_length += len(boundary_string) + size
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.insert(-1, type_string)
content_length += len(type_string)
self._body_parts.insert(-1, data)
self.headers['Content-Length'] = str(content_length)
# I could add an "append_to_body_part" method as well.
AddBodyPart = add_body_part
def add_form_inputs(self, form_data,
mime_type='application/x-www-form-urlencoded'):
"""Form-encodes and adds data to the request body.
Args:
form_data: dict or sequnce or two member tuples which contains the
form keys and values.
mime_type: str The MIME type of the form data being sent. Defaults
to 'application/x-www-form-urlencoded'.
"""
body = urllib.urlencode(form_data)
self.add_body_part(body, mime_type)
AddFormInputs = add_form_inputs
def _copy(self):
"""Creates a deep copy of this request."""
copied_uri = Uri(self.uri.scheme, self.uri.host, self.uri.port,
self.uri.path, self.uri.query.copy())
new_request = HttpRequest(uri=copied_uri, method=self.method,
headers=self.headers.copy())
new_request._body_parts = self._body_parts[:]
return new_request
def _dump(self):
"""Converts to a printable string for debugging purposes.
In order to preserve the request, it does not read from file-like objects
in the body.
"""
output = 'HTTP Request\n method: %s\n url: %s\n headers:\n' % (
self.method, str(self.uri))
for header, value in self.headers.iteritems():
output += ' %s: %s\n' % (header, value)
output += ' body sections:\n'
i = 0
for part in self._body_parts:
if isinstance(part, (str, unicode)):
output += ' %s: %s\n' % (i, part)
else:
output += ' %s: <file like object>\n' % i
i += 1
return output
def _apply_defaults(http_request):
if http_request.uri.scheme is None:
if http_request.uri.port == 443:
http_request.uri.scheme = 'https'
else:
http_request.uri.scheme = 'http'
class Uri(object):
"""A URI as used in HTTP 1.1"""
scheme = None
host = None
port = None
path = None
def __init__(self, scheme=None, host=None, port=None, path=None, query=None):
"""Constructor for a URI.
Args:
scheme: str This is usually 'http' or 'https'.
host: str The host name or IP address of the desired server.
post: int The server's port number.
path: str The path of the resource following the host. This begins with
a /, example: '/calendar/feeds/default/allcalendars/full'
query: dict of strings The URL query parameters. The keys and values are
both escaped so this dict should contain the unescaped values.
For example {'my key': 'val', 'second': '!!!'} will become
'?my+key=val&second=%21%21%21' which is appended to the path.
"""
self.query = query or {}
if scheme is not None:
self.scheme = scheme
if host is not None:
self.host = host
if port is not None:
self.port = port
if path:
self.path = path
def _get_query_string(self):
param_pairs = []
for key, value in self.query.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def _get_relative_path(self):
"""Returns the path with the query parameters escaped and appended."""
param_string = self._get_query_string()
if self.path is None:
path = '/'
else:
path = self.path
if param_string:
return '?'.join([path, param_string])
else:
return path
def _to_string(self):
if self.scheme is None and self.port == 443:
scheme = 'https'
elif self.scheme is None:
scheme = 'http'
else:
scheme = self.scheme
if self.path is None:
path = '/'
else:
path = self.path
if self.port is None:
return '%s://%s%s' % (scheme, self.host, self._get_relative_path())
else:
return '%s://%s:%s%s' % (scheme, self.host, str(self.port),
self._get_relative_path())
def __str__(self):
return self._to_string()
def modify_request(self, http_request=None):
"""Sets HTTP request components based on the URI."""
if http_request is None:
http_request = HttpRequest()
if http_request.uri is None:
http_request.uri = Uri()
# Determine the correct scheme.
if self.scheme:
http_request.uri.scheme = self.scheme
if self.port:
http_request.uri.port = self.port
if self.host:
http_request.uri.host = self.host
# Set the relative uri path
if self.path:
http_request.uri.path = self.path
if self.query:
http_request.uri.query = self.query.copy()
return http_request
ModifyRequest = modify_request
def parse_uri(uri_string):
"""Creates a Uri object which corresponds to the URI string.
This method can accept partial URIs, but it will leave missing
members of the Uri unset.
"""
parts = urlparse.urlparse(uri_string)
uri = Uri()
if parts[0]:
uri.scheme = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
uri.host = host_parts[0]
if len(host_parts) > 1:
uri.port = int(host_parts[1])
if parts[2]:
uri.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
uri.query[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
uri.query[urllib.unquote_plus(pair_parts[0])] = None
return uri
parse_uri = staticmethod(parse_uri)
ParseUri = parse_uri
parse_uri = Uri.parse_uri
ParseUri = Uri.parse_uri
class HttpResponse(object):
status = None
reason = None
_body = None
def __init__(self, status=None, reason=None, headers=None, body=None):
self._headers = headers or {}
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if body is not None:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def getheaders(self):
return self._headers
def read(self, amt=None):
if self._body is None:
return None
if not amt:
return self._body.read()
else:
return self._body.read(amt)
def _dump_response(http_response):
"""Converts to a string for printing debug messages.
Does not read the body since that may consume the content.
"""
output = 'HttpResponse\n status: %s\n reason: %s\n headers:' % (
http_response.status, http_response.reason)
headers = get_headers(http_response)
if isinstance(headers, dict):
for header, value in headers.iteritems():
output += ' %s: %s\n' % (header, value)
else:
for pair in headers:
output += ' %s: %s\n' % (pair[0], pair[1])
return output
class HttpClient(object):
"""Performs HTTP requests using httplib."""
debug = None
def request(self, http_request):
return self._http_request(http_request.method, http_request.uri,
http_request.headers, http_request._body_parts)
Request = request
def _get_connection(self, uri, headers=None):
"""Opens a socket connection to the server to set up an HTTP request.
Args:
uri: The full URL for the request as a Uri object.
headers: A dict of string pairs containing the HTTP headers for the
request.
"""
connection = None
if uri.scheme == 'https':
if not uri.port:
connection = httplib.HTTPSConnection(uri.host)
else:
connection = httplib.HTTPSConnection(uri.host, int(uri.port))
else:
if not uri.port:
connection = httplib.HTTPConnection(uri.host)
else:
connection = httplib.HTTPConnection(uri.host, int(uri.port))
return connection
def _http_request(self, method, uri, headers=None, body_parts=None):
"""Makes an HTTP request using httplib.
Args:
method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc.
uri: str or atom.http_core.Uri
headers: dict of strings mapping to strings which will be sent as HTTP
headers in the request.
body_parts: list of strings, objects with a read method, or objects
which can be converted to strings using str. Each of these
will be sent in order as the body of the HTTP request.
"""
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
connection = self._get_connection(uri, headers=headers)
if self.debug:
connection.debuglevel = 1
if connection.host != uri.host:
connection.putrequest(method, str(uri))
else:
connection.putrequest(method, uri._get_relative_path())
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (uri.scheme == 'https' and int(uri.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % uri.host
replacement_header_line = 'Host: %s' % uri.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name, value in headers.iteritems():
connection.putheader(header_name, value)
connection.endheaders()
# If there is data, send it in the request.
if body_parts:
for part in body_parts:
_send_data_part(part, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _send_data_part(data, connection):
if isinstance(data, (str, unicode)):
# I might want to just allow str, not unicode.
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
class ProxiedHttpClient(HttpClient):
def _get_connection(self, uri, headers=None):
# Check to see if there are proxy settings required for this request.
proxy = None
if uri.scheme == 'https':
proxy = os.environ.get('https_proxy')
elif uri.scheme == 'http':
proxy = os.environ.get('http_proxy')
if not proxy:
return HttpClient._get_connection(self, uri, headers=headers)
# Now we have the URL of the appropriate proxy server.
# Get a username and password for the proxy if required.
proxy_auth = _get_proxy_auth()
if uri.scheme == 'https':
import socket
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = uri.port
if not port:
port = 443
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (uri.host, port)
# Set the user agent to send to the proxy
user_agent = ''
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_uri.host, int(proxy_uri.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl is not None:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, Nonesock_)
sslobj = httplib.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_uri.host)
connection.sock = sslobj
return connection
elif uri.scheme == 'http':
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_uri.host, int(proxy_uri.port))
return None
def _get_proxy_auth():
import base64
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.b64encode('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes representing Atom elements.
Module objective: provide data classes for Atom constructs. These classes hide
the XML-ness of Atom and provide a set of native Python classes to interact
with.
Conversions to and from XML should only be necessary when the Atom classes
"touch the wire" and are sent over HTTP. For this reason this module
provides methods and functions to convert Atom classes to and from strings.
For more information on the Atom data model, see RFC 4287
(http://www.ietf.org/rfc/rfc4287.txt)
AtomBase: A foundation class on which Atom classes are built. It
handles the parsing of attributes and children which are common to all
Atom classes. By default, the AtomBase class translates all XML child
nodes into ExtensionElements.
ExtensionElement: Atom allows Atom objects to contain XML which is not part
of the Atom specification, these are called extension elements. If a
classes parser encounters an unexpected XML construct, it is translated
into an ExtensionElement instance. ExtensionElement is designed to fully
capture the information in the XML. Child nodes in an XML extension are
turned into ExtensionElements as well.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import warnings
# XML namespaces which are often used in Atom entities.
ATOM_NAMESPACE = 'http://www.w3.org/2005/Atom'
ELEMENT_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
APP_NAMESPACE = 'http://purl.org/atom/app#'
APP_TEMPLATE = '{http://purl.org/atom/app#}%s'
# This encoding is used for converting strings before translating the XML
# into an object.
XML_STRING_ENCODING = 'utf-8'
# The desired string encoding for object members. set or monkey-patch to
# unicode if you want object members to be Python unicode strings, instead of
# encoded strings
MEMBER_STRING_ENCODING = 'utf-8'
#MEMBER_STRING_ENCODING = unicode
# If True, all methods which are exclusive to v1 will raise a
# DeprecationWarning
ENABLE_V1_WARNINGS = False
def v1_deprecated(warning=None):
"""Shows a warning if ENABLE_V1_WARNINGS is True.
Function decorator used to mark methods used in v1 classes which
may be removed in future versions of the library.
"""
warning = warning or ''
# This closure is what is returned from the deprecated function.
def mark_deprecated(f):
# The deprecated_function wraps the actual call to f.
def optional_warn_function(*args, **kwargs):
if ENABLE_V1_WARNINGS:
warnings.warn(warning, DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
# Preserve the original name to avoid masking all decorated functions as
# 'deprecated_function'
try:
optional_warn_function.func_name = f.func_name
except TypeError:
pass # In Python2.3 we can't set the func_name
return optional_warn_function
return mark_deprecated
def CreateClassFromXMLString(target_class, xml_string, string_encoding=None):
"""Creates an instance of the target class from the string contents.
Args:
target_class: class The class which will be instantiated and populated
with the contents of the XML. This class must have a _tag and a
_namespace class variable.
xml_string: str A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
string_encoding: str The character encoding which the xml_string should
be converted to before it is interpreted and translated into
objects. The default is None in which case the string encoding
is not changed.
Returns:
An instance of the target class with members assigned according to the
contents of the XML - or None if the root XML tag and namespace did not
match those of the target class.
"""
encoding = string_encoding or XML_STRING_ENCODING
if encoding and isinstance(xml_string, unicode):
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _CreateClassFromElementTree(target_class, tree)
CreateClassFromXMLString = v1_deprecated(
'Please use atom.core.parse with atom.data classes instead.')(
CreateClassFromXMLString)
def _CreateClassFromElementTree(target_class, tree, namespace=None, tag=None):
"""Instantiates the class and populates members according to the tree.
Note: Only use this function with classes that have _namespace and _tag
class members.
Args:
target_class: class The class which will be instantiated and populated
with the contents of the XML.
tree: ElementTree An element tree whose contents will be converted into
members of the new target_class instance.
namespace: str (optional) The namespace which the XML tree's root node must
match. If omitted, the namespace defaults to the _namespace of the
target class.
tag: str (optional) The tag which the XML tree's root node must match. If
omitted, the tag defaults to the _tag class member of the target
class.
Returns:
An instance of the target class - or None if the tag and namespace of
the XML tree's root node did not match the desired namespace and tag.
"""
if namespace is None:
namespace = target_class._namespace
if tag is None:
tag = target_class._tag
if tree.tag == '{%s}%s' % (namespace, tag):
target = target_class()
target._HarvestElementTree(tree)
return target
else:
return None
class ExtensionContainer(object):
def __init__(self, extension_elements=None, extension_attributes=None,
text=None):
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
__init__ = v1_deprecated(
'Please use data model classes in atom.data instead.')(
__init__)
# Three methods to create an object from an ElementTree
def _HarvestElementTree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._ConvertElementTreeToMember(child)
for attribute, value in tree.attrib.iteritems():
self._ConvertElementAttributeToMember(attribute, value)
# Encode the text string according to the desired encoding type. (UTF-8)
if tree.text:
if MEMBER_STRING_ENCODING is unicode:
self.text = tree.text
else:
self.text = tree.text.encode(MEMBER_STRING_ENCODING)
def _ConvertElementTreeToMember(self, child_tree, current_class=None):
self.extension_elements.append(_ExtensionElementFromElementTree(
child_tree))
def _ConvertElementAttributeToMember(self, attribute, value):
# Encode the attribute value's string with the desired type Default UTF-8
if value:
if MEMBER_STRING_ENCODING is unicode:
self.extension_attributes[attribute] = value
else:
self.extension_attributes[attribute] = value.encode(
MEMBER_STRING_ENCODING)
# One method to create an ElementTree from an object
def _AddMembersToElementTree(self, tree):
for child in self.extension_elements:
child._BecomeChildElement(tree)
for attribute, value in self.extension_attributes.iteritems():
if value:
if isinstance(value, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.attrib[attribute] = value
else:
# Decode the value from the desired encoding (default UTF-8).
tree.attrib[attribute] = value.decode(MEMBER_STRING_ENCODING)
if self.text:
if isinstance(self.text, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.text = self.text
else:
tree.text = self.text.decode(MEMBER_STRING_ENCODING)
def FindExtensions(self, tag=None, namespace=None):
"""Searches extension elements for child nodes with the desired name.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all extensions in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
Args:
tag: str (optional) The desired tag
namespace: str (optional) The desired namespace
Returns:
A list of elements whose tag and/or namespace match the parameters
values
"""
results = []
if tag and namespace:
for element in self.extension_elements:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.extension_elements:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.extension_elements:
if element.namespace == namespace:
results.append(element)
else:
for element in self.extension_elements:
results.append(element)
return results
class AtomBase(ExtensionContainer):
_children = {}
_attributes = {}
def __init__(self, extension_elements=None, extension_attributes=None,
text=None):
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
__init__ = v1_deprecated(
'Please use data model classes in atom.data instead.')(
__init__)
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(_CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
_CreateClassFromElementTree(member_class, child_tree))
else:
ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
def _ConvertElementAttributeToMember(self, attribute, value):
# Find the attribute in this class's list of attributes.
if self.__class__._attributes.has_key(attribute):
# Find the member of this class which corresponds to the XML attribute
# (lookup in current_class._attributes) and set this member to the
# desired value (using self.__dict__).
if value:
# Encode the string to capture non-ascii characters (default UTF-8)
if MEMBER_STRING_ENCODING is unicode:
setattr(self, self.__class__._attributes[attribute], value)
else:
setattr(self, self.__class__._attributes[attribute],
value.encode(MEMBER_STRING_ENCODING))
else:
ExtensionContainer._ConvertElementAttributeToMember(
self, attribute, value)
# Three methods to create an ElementTree from an object
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
if isinstance(member, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.attrib[xml_attribute] = member
else:
tree.attrib[xml_attribute] = member.decode(MEMBER_STRING_ENCODING)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
ExtensionContainer._AddMembersToElementTree(self, tree)
def _BecomeChildElement(self, tree):
"""
Note: Only for use with classes that have a _tag and _namespace class
member. It is in AtomBase so that it can be inherited but it should
not be called on instances of AtomBase.
"""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.__class__._tag)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
"""
Note, this method is designed to be used only with classes that have a
_tag and _namespace. It is placed in AtomBase for inheritance but should
not be called on this class.
"""
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.__class__._tag))
self._AddMembersToElementTree(new_tree)
return new_tree
def ToString(self, string_encoding='UTF-8'):
"""Converts the Atom object to a string containing XML."""
return ElementTree.tostring(self._ToElementTree(), encoding=string_encoding)
def __str__(self):
return self.ToString()
class Name(AtomBase):
"""The atom:name element"""
_tag = 'name'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Name
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NameFromString(xml_string):
return CreateClassFromXMLString(Name, xml_string)
class Email(AtomBase):
"""The atom:email element"""
_tag = 'email'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Email
Args:
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailFromString(xml_string):
return CreateClassFromXMLString(Email, xml_string)
class Uri(AtomBase):
"""The atom:uri element"""
_tag = 'uri'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Uri
Args:
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UriFromString(xml_string):
return CreateClassFromXMLString(Uri, xml_string)
class Person(AtomBase):
"""A foundation class from which atom:author and atom:contributor extend.
A person contains information like name, email address, and web page URI for
an author or contributor to an Atom feed.
"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}name' % (ATOM_NAMESPACE)] = ('name', Name)
_children['{%s}email' % (ATOM_NAMESPACE)] = ('email', Email)
_children['{%s}uri' % (ATOM_NAMESPACE)] = ('uri', Uri)
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Foundation from which author and contributor are derived.
The constructor is provided for illustrative purposes, you should not
need to instantiate a Person.
Args:
name: Name The person's name
email: Email The person's email address
uri: Uri The URI of the person's webpage
extension_elements: list A list of ExtensionElement instances which are
children of this element.
extension_attributes: dict A dictionary of strings which are the values
for additional XML attributes of this element.
text: String The text contents of the element. This is the contents
of the Entry's XML text node. (Example: <foo>This is the text</foo>)
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
class Author(Person):
"""The atom:author element
An author is a required element in Feed.
"""
_tag = 'author'
_namespace = ATOM_NAMESPACE
_children = Person._children.copy()
_attributes = Person._attributes.copy()
#_children = {}
#_attributes = {}
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Author
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def AuthorFromString(xml_string):
return CreateClassFromXMLString(Author, xml_string)
class Contributor(Person):
"""The atom:contributor element"""
_tag = 'contributor'
_namespace = ATOM_NAMESPACE
_children = Person._children.copy()
_attributes = Person._attributes.copy()
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Contributor
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def ContributorFromString(xml_string):
return CreateClassFromXMLString(Contributor, xml_string)
class Link(AtomBase):
"""The atom:link element"""
_tag = 'link'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['href'] = 'href'
_attributes['type'] = 'type'
_attributes['title'] = 'title'
_attributes['length'] = 'length'
_attributes['hreflang'] = 'hreflang'
def __init__(self, href=None, rel=None, link_type=None, hreflang=None,
title=None, length=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Link
Args:
href: string The href attribute of the link
rel: string
type: string
hreflang: string The language for the href
title: string
length: string The length of the href's destination
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.href = href
self.rel = rel
self.type = link_type
self.hreflang = hreflang
self.title = title
self.length = length
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LinkFromString(xml_string):
return CreateClassFromXMLString(Link, xml_string)
class Generator(AtomBase):
"""The atom:generator element"""
_tag = 'generator'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['uri'] = 'uri'
_attributes['version'] = 'version'
def __init__(self, uri=None, version=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Generator
Args:
uri: string
version: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.uri = uri
self.version = version
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GeneratorFromString(xml_string):
return CreateClassFromXMLString(Generator, xml_string)
class Text(AtomBase):
"""A foundation class from which atom:title, summary, etc. extend.
This class should never be instantiated.
"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, text_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Text
Args:
text_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = text_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Title(Text):
"""The atom:title element"""
_tag = 'title'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, title_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Title
Args:
title_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = title_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def TitleFromString(xml_string):
return CreateClassFromXMLString(Title, xml_string)
class Subtitle(Text):
"""The atom:subtitle element"""
_tag = 'subtitle'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, subtitle_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Subtitle
Args:
subtitle_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = subtitle_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SubtitleFromString(xml_string):
return CreateClassFromXMLString(Subtitle, xml_string)
class Rights(Text):
"""The atom:rights element"""
_tag = 'rights'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, rights_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Rights
Args:
rights_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = rights_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def RightsFromString(xml_string):
return CreateClassFromXMLString(Rights, xml_string)
class Summary(Text):
"""The atom:summary element"""
_tag = 'summary'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, summary_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Summary
Args:
summary_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = summary_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SummaryFromString(xml_string):
return CreateClassFromXMLString(Summary, xml_string)
class Content(Text):
"""The atom:content element"""
_tag = 'content'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
_attributes['src'] = 'src'
def __init__(self, content_type=None, src=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Content
Args:
content_type: string
src: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = content_type
self.src = src
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ContentFromString(xml_string):
return CreateClassFromXMLString(Content, xml_string)
class Category(AtomBase):
"""The atom:category element"""
_tag = 'category'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['term'] = 'term'
_attributes['scheme'] = 'scheme'
_attributes['label'] = 'label'
def __init__(self, term=None, scheme=None, label=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Category
Args:
term: str
scheme: str
label: str
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.term = term
self.scheme = scheme
self.label = label
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def CategoryFromString(xml_string):
return CreateClassFromXMLString(Category, xml_string)
class Id(AtomBase):
"""The atom:id element."""
_tag = 'id'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Id
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def IdFromString(xml_string):
return CreateClassFromXMLString(Id, xml_string)
class Icon(AtomBase):
"""The atom:icon element."""
_tag = 'icon'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Icon
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def IconFromString(xml_string):
return CreateClassFromXMLString(Icon, xml_string)
class Logo(AtomBase):
"""The atom:logo element."""
_tag = 'logo'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Logo
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LogoFromString(xml_string):
return CreateClassFromXMLString(Logo, xml_string)
class Draft(AtomBase):
"""The app:draft element which indicates if this entry should be public."""
_tag = 'draft'
_namespace = APP_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for app:draft
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def DraftFromString(xml_string):
return CreateClassFromXMLString(Draft, xml_string)
class Control(AtomBase):
"""The app:control element indicating restrictions on publication.
The APP control element may contain a draft element indicating whether or
not this entry should be publicly available.
"""
_tag = 'control'
_namespace = APP_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}draft' % APP_NAMESPACE] = ('draft', Draft)
def __init__(self, draft=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for app:control"""
self.draft = draft
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ControlFromString(xml_string):
return CreateClassFromXMLString(Control, xml_string)
class Date(AtomBase):
"""A parent class for atom:updated, published, etc."""
#TODO Add text to and from time conversion methods to allow users to set
# the contents of a Date to a python DateTime object.
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Updated(Date):
"""The atom:updated element."""
_tag = 'updated'
_namespace = ATOM_NAMESPACE
_children = Date._children.copy()
_attributes = Date._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Updated
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UpdatedFromString(xml_string):
return CreateClassFromXMLString(Updated, xml_string)
class Published(Date):
"""The atom:published element."""
_tag = 'published'
_namespace = ATOM_NAMESPACE
_children = Date._children.copy()
_attributes = Date._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Published
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def PublishedFromString(xml_string):
return CreateClassFromXMLString(Published, xml_string)
class LinkFinder(object):
"""An "interface" providing methods to find link elements
Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in Atom entries and feeds.
"""
def GetSelfLink(self):
"""Find the first link with rel set to 'self'
Returns:
An atom.Link or none if none of the links had rel equal to 'self'
"""
for a_link in self.link:
if a_link.rel == 'self':
return a_link
return None
def GetEditLink(self):
for a_link in self.link:
if a_link.rel == 'edit':
return a_link
return None
def GetEditMediaLink(self):
for a_link in self.link:
if a_link.rel == 'edit-media':
return a_link
return None
def GetNextLink(self):
for a_link in self.link:
if a_link.rel == 'next':
return a_link
return None
def GetLicenseLink(self):
for a_link in self.link:
if a_link.rel == 'license':
return a_link
return None
def GetAlternateLink(self):
for a_link in self.link:
if a_link.rel == 'alternate':
return a_link
return None
class FeedEntryParent(AtomBase, LinkFinder):
"""A super class for atom:feed and entry, contains shared attributes"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}author' % ATOM_NAMESPACE] = ('author', [Author])
_children['{%s}category' % ATOM_NAMESPACE] = ('category', [Category])
_children['{%s}contributor' % ATOM_NAMESPACE] = ('contributor', [Contributor])
_children['{%s}id' % ATOM_NAMESPACE] = ('id', Id)
_children['{%s}link' % ATOM_NAMESPACE] = ('link', [Link])
_children['{%s}rights' % ATOM_NAMESPACE] = ('rights', Rights)
_children['{%s}title' % ATOM_NAMESPACE] = ('title', Title)
_children['{%s}updated' % ATOM_NAMESPACE] = ('updated', Updated)
def __init__(self, author=None, category=None, contributor=None,
atom_id=None, link=None, rights=None, title=None, updated=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.rights = rights
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Source(FeedEntryParent):
"""The atom:source element"""
_tag = 'source'
_namespace = ATOM_NAMESPACE
_children = FeedEntryParent._children.copy()
_attributes = FeedEntryParent._attributes.copy()
_children['{%s}generator' % ATOM_NAMESPACE] = ('generator', Generator)
_children['{%s}icon' % ATOM_NAMESPACE] = ('icon', Icon)
_children['{%s}logo' % ATOM_NAMESPACE] = ('logo', Logo)
_children['{%s}subtitle' % ATOM_NAMESPACE] = ('subtitle', Subtitle)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SourceFromString(xml_string):
return CreateClassFromXMLString(Source, xml_string)
class Entry(FeedEntryParent):
"""The atom:entry element"""
_tag = 'entry'
_namespace = ATOM_NAMESPACE
_children = FeedEntryParent._children.copy()
_attributes = FeedEntryParent._attributes.copy()
_children['{%s}content' % ATOM_NAMESPACE] = ('content', Content)
_children['{%s}published' % ATOM_NAMESPACE] = ('published', Published)
_children['{%s}source' % ATOM_NAMESPACE] = ('source', Source)
_children['{%s}summary' % ATOM_NAMESPACE] = ('summary', Summary)
_children['{%s}control' % APP_NAMESPACE] = ('control', Control)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for atom:entry
Args:
author: list A list of Author instances which belong to this class.
category: list A list of Category instances
content: Content The entry's Content
contributor: list A list on Contributor instances
id: Id The entry's Id element
link: list A list of Link instances
published: Published The entry's Published element
rights: Rights The entry's Rights element
source: Source the entry's source element
summary: Summary the entry's summary element
title: Title the entry's title element
updated: Updated the entry's updated element
control: The entry's app:control element which can be used to mark an
entry as a draft which should not be publicly viewable.
text: String The text contents of the element. This is the contents
of the Entry's XML text node. (Example: <foo>This is the text</foo>)
extension_elements: list A list of ExtensionElement instances which are
children of this element.
extension_attributes: dict A dictionary of strings which are the values
for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
__init__ = v1_deprecated('Please use atom.data.Entry instead.')(__init__)
def EntryFromString(xml_string):
return CreateClassFromXMLString(Entry, xml_string)
class Feed(Source):
"""The atom:feed element"""
_tag = 'feed'
_namespace = ATOM_NAMESPACE
_children = Source._children.copy()
_attributes = Source._attributes.copy()
_children['{%s}entry' % ATOM_NAMESPACE] = ('entry', [Entry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
text=None, extension_elements=None, extension_attributes=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
entry: list (optional) A list of the Entry instances contained in the
feed.
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.entry = entry or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
__init__ = v1_deprecated('Please use atom.data.Feed instead.')(__init__)
def FeedFromString(xml_string):
return CreateClassFromXMLString(Feed, xml_string)
class ExtensionElement(object):
"""Represents extra XML elements contained in Atom classes."""
def __init__(self, tag, namespace=None, attributes=None,
children=None, text=None):
"""Constructor for EtensionElement
Args:
namespace: string (optional) The XML namespace for this element.
tag: string (optional) The tag (without the namespace qualifier) for
this element. To reconstruct the full qualified name of the element,
combine this tag with the namespace.
attributes: dict (optinal) The attribute value string pairs for the XML
attributes of this element.
children: list (optional) A list of ExtensionElements which represent
the XML child nodes of this element.
"""
self.namespace = namespace
self.tag = tag
self.attributes = attributes or {}
self.children = children or []
self.text = text
def ToString(self):
element_tree = self._TransferToElementTree(ElementTree.Element(''))
return ElementTree.tostring(element_tree, encoding="UTF-8")
def _TransferToElementTree(self, element_tree):
if self.tag is None:
return None
if self.namespace is not None:
element_tree.tag = '{%s}%s' % (self.namespace, self.tag)
else:
element_tree.tag = self.tag
for key, value in self.attributes.iteritems():
element_tree.attrib[key] = value
for child in self.children:
child._BecomeChildElement(element_tree)
element_tree.text = self.text
return element_tree
def _BecomeChildElement(self, element_tree):
"""Converts this object into an etree element and adds it as a child node.
Adds self to the ElementTree. This method is required to avoid verbose XML
which constantly redefines the namespace.
Args:
element_tree: ElementTree._Element The element to which this object's XML
will be added.
"""
new_element = ElementTree.Element('')
element_tree.append(new_element)
self._TransferToElementTree(new_element)
def FindChildren(self, tag=None, namespace=None):
"""Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
Args:
tag: str (optional) The desired tag
namespace: str (optional) The desired namespace
Returns:
A list of elements whose tag and/or namespace match the parameters
values
"""
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results
def ExtensionElementFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _ExtensionElementFromElementTree(element_tree)
def _ExtensionElementFromElementTree(element_tree):
element_tag = element_tree.tag
if '}' in element_tag:
namespace = element_tag[1:element_tag.index('}')]
tag = element_tag[element_tag.index('}')+1:]
else:
namespace = None
tag = element_tag
extension = ExtensionElement(namespace=namespace, tag=tag)
for key, value in element_tree.attrib.iteritems():
extension.attributes[key] = value
for child in element_tree:
extension.children.append(_ExtensionElementFromElementTree(child))
extension.text = element_tree.text
return extension
def deprecated(warning=None):
"""Decorator to raise warning each time the function is called.
Args:
warning: The warning message to be displayed as a string (optinoal).
"""
warning = warning or ''
# This closure is what is returned from the deprecated function.
def mark_deprecated(f):
# The deprecated_function wraps the actual call to f.
def deprecated_function(*args, **kwargs):
warnings.warn(warning, DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
# Preserve the original name to avoid masking all decorated functions as
# 'deprecated_function'
try:
deprecated_function.func_name = f.func_name
except TypeError:
# Setting the func_name is not allowed in Python2.3.
pass
return deprecated_function
return mark_deprecated
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtomService provides CRUD ops. in line with the Atom Publishing Protocol.
AtomService: Encapsulates the ability to perform insert, update and delete
operations with the Atom Publishing Protocol on which GData is
based. An instance can perform query, insertion, deletion, and
update.
HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request
to the specified end point. An AtomService object or a subclass can be
used to specify information about the request.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
import atom.http
import atom.token_store
import os
import httplib
import urllib
import re
import base64
import socket
import warnings
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
class AtomService(object):
"""Performs Atom Publishing Protocol CRUD operations.
The AtomService contains methods to perform HTTP CRUD operations.
"""
# Default values for members
port = 80
ssl = False
# Set the current_token to force the AtomService to use this token
# instead of searching for an appropriate token in the token_store.
current_token = None
auto_store_tokens = True
auto_set_current_token = True
def _get_override_token(self):
return self.current_token
def _set_override_token(self, token):
self.current_token = token
override_token = property(_get_override_token, _set_override_token)
#@atom.v1_deprecated('Please use atom.client.AtomPubClient instead.')
def __init__(self, server=None, additional_headers=None,
application_name='', http_client=None, token_store=None):
"""Creates a new AtomService client.
Args:
server: string (optional) The start of a URL for the server
to which all operations should be directed. Example:
'www.google.com'
additional_headers: dict (optional) Any additional HTTP headers which
should be included with CRUD operations.
http_client: An object responsible for making HTTP requests using a
request method. If none is provided, a new instance of
atom.http.ProxiedHttpClient will be used.
token_store: Keeps a collection of authorization tokens which can be
applied to requests for a specific URLs. Critical methods are
find_token based on a URL (atom.url.Url or a string), add_token,
and remove_token.
"""
self.http_client = http_client or atom.http.ProxiedHttpClient()
self.token_store = token_store or atom.token_store.TokenStore()
self.server = server
self.additional_headers = additional_headers or {}
self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % (
application_name,)
# If debug is True, the HTTPConnection will display debug information
self._set_debug(False)
__init__ = atom.v1_deprecated(
'Please use atom.client.AtomPubClient instead.')(
__init__)
def _get_debug(self):
return self.http_client.debug
def _set_debug(self, value):
self.http_client.debug = value
debug = property(_get_debug, _set_debug,
doc='If True, HTTP debug information is printed.')
def use_basic_auth(self, username, password, scopes=None):
if username is not None and password is not None:
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
base_64_string = base64.encodestring('%s:%s' % (username, password))
token = BasicAuthToken('Basic %s' % base_64_string.strip(),
scopes=[atom.token_store.SCOPE_ALL])
if self.auto_set_current_token:
self.current_token = token
if self.auto_store_tokens:
return self.token_store.add_token(token)
return True
return False
def UseBasicAuth(self, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use use_basic_auth instead.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext.
Args:
username: str
password: str
"""
self.use_basic_auth(username, password)
#@atom.v1_deprecated('Please use atom.client.AtomPubClient for requests.')
def request(self, operation, url, data=None, headers=None,
url_params=None):
if isinstance(url, (str, unicode)):
if url.startswith('http:') and self.ssl:
# Force all requests to be https if self.ssl is True.
url = atom.url.parse_url('https:' + url[5:])
elif not url.startswith('http') and self.ssl:
url = atom.url.parse_url('https://%s%s' % (self.server, url))
elif not url.startswith('http'):
url = atom.url.parse_url('http://%s%s' % (self.server, url))
else:
url = atom.url.parse_url(url)
if url_params:
for name, value in url_params.iteritems():
url.params[name] = value
all_headers = self.additional_headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
content_length = CalculateDataLength(data)
if content_length:
all_headers['Content-Length'] = str(content_length)
# Find an Authorization token for this URL if one is available.
if self.override_token:
auth_token = self.override_token
else:
auth_token = self.token_store.find_token(url)
return auth_token.perform_request(self.http_client, operation, url,
data=data, headers=all_headers)
request = atom.v1_deprecated(
'Please use atom.client.AtomPubClient for requests.')(
request)
# CRUD operations
def Get(self, uri, extra_headers=None, url_params=None, escape_params=True):
"""Query the APP server with the given URI
The uri is the portion of the URI after the server value
(server example: 'www.google.com').
Example use:
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dicty (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse The server's response to the GET request.
"""
return self.request('GET', uri, data=None, headers=extra_headers,
url_params=url_params)
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Insert data into an APP server at the given URI.
Args:
data: string, ElementTree._Element, or something with a __str__ method
The XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the POST request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('POST', uri, data=data, headers=extra_headers,
url_params=url_params)
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the PUT request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('PUT', uri, data=data, headers=extra_headers,
url_params=url_params)
def Delete(self, uri, extra_headers=None, url_params=None,
escape_params=True):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the DELETE request.
"""
return self.request('DELETE', uri, data=None, headers=extra_headers,
url_params=url_params)
class BasicAuthToken(atom.http_interface.GenericToken):
def __init__(self, auth_header, scopes=None):
"""Creates a token used to add Basic Auth headers to HTTP requests.
Args:
auth_header: str The value for the Authorization header.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
self.auth_header = auth_header
self.scopes = scopes or []
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header to the basic auth string."""
if headers is None:
headers = {'Authorization':self.auth_header}
else:
headers['Authorization'] = self.auth_header
return http_client.request(operation, url, data=data, headers=headers)
def __str__(self):
return self.auth_header
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, (str, unicode)):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
def PrepareConnection(service, full_uri):
"""Opens a connection to the server based on the full URI.
This method is deprecated, instead use atom.http.HttpClient.request.
Examines the target URI and the proxy settings, which are set as
environment variables, to open a connection with the server. This
connection is used to make an HTTP request.
Args:
service: atom.AtomService or a subclass. It must have a server string which
represents the server host to which the request should be made. It may also
have a dictionary of additional_headers to send in the HTTP request.
full_uri: str Which is the target relative (lacks protocol and host) or
absolute URL to be opened. Example:
'https://www.google.com/accounts/ClientLogin' or
'base/feeds/snippets' where the server is set to www.google.com.
Returns:
A tuple containing the httplib.HTTPConnection and the full_uri for the
request.
"""
deprecation('calling deprecated function PrepareConnection')
(server, port, ssl, partial_uri) = ProcessUrl(service, full_uri)
if ssl:
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % (
user_auth.strip()))
else:
proxy_authorization = ''
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port)
user_agent = 'User-Agent: %s\r\n' % (
service.additional_headers['User-Agent'])
proxy_pieces = (proxy_connect + proxy_authorization + user_agent
+ '\r\n')
#now connect, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((p_server,p_port))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status=response.split()[1]
if p_status!=str(200):
raise 'Error status=',str(p_status)
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(server)
connection.sock=fake_sock
full_uri = partial_uri
else:
connection = httplib.HTTPSConnection(server, port)
full_uri = partial_uri
else:
# destination is http
proxy = os.environ.get('http_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
UseBasicAuth(service, proxy_username, proxy_password, True)
connection = httplib.HTTPConnection(p_server, p_port)
if not full_uri.startswith("http://"):
if full_uri.startswith("/"):
full_uri = "http://%s%s" % (service.server, full_uri)
else:
full_uri = "http://%s/%s" % (service.server, full_uri)
else:
connection = httplib.HTTPConnection(server, port)
full_uri = partial_uri
return (connection, full_uri)
def UseBasicAuth(service, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use AtomService.use_basic_auth insread.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext. The auth header is added to the
additional_headers dictionary in the service object.
Args:
service: atom.AtomService or a subclass which has an
additional_headers dict as a member.
username: str
password: str
"""
deprecation('calling deprecated function UseBasicAuth')
base_64_string = base64.encodestring('%s:%s' % (username, password))
base_64_string = base_64_string.strip()
if for_proxy:
header_name = 'Proxy-Authorization'
else:
header_name = 'Authorization'
service.additional_headers[header_name] = 'Basic %s' % (base_64_string,)
def ProcessUrl(service, url, for_proxy=False):
"""Processes a passed URL. If the URL does not begin with https?, then
the default value for server is used
This method is deprecated, use atom.url.parse_url instead.
"""
if not isinstance(url, atom.url.Url):
url = atom.url.parse_url(url)
server = url.host
ssl = False
port = 80
if not server:
if hasattr(service, 'server'):
server = service.server
else:
server = service
if not url.protocol and hasattr(service, 'ssl'):
ssl = service.ssl
if hasattr(service, 'port'):
port = service.port
else:
if url.protocol == 'https':
ssl = True
elif url.protocol == 'http':
ssl = False
if url.port:
port = int(url.port)
elif port == 80 and ssl:
port = 443
return (server, port, ssl, url.get_request_uri())
def DictionaryToParamList(url_parameters, escape_params=True):
"""Convert a dictionary of URL arguments into a URL parameter string.
This function is deprcated, use atom.url.Url instead.
Args:
url_parameters: The dictionaty of key-value pairs which will be converted
into URL parameters. For example,
{'dry-run': 'true', 'foo': 'bar'}
will become ['dry-run=true', 'foo=bar'].
Returns:
A list which contains a string for each key-value pair. The strings are
ready to be incorporated into a URL by using '&'.join([] + parameter_list)
"""
# Choose which function to use when modifying the query and parameters.
# Use quote_plus when escape_params is true.
transform_op = [str, urllib.quote_plus][bool(escape_params)]
# Create a list of tuples containing the escaped version of the
# parameter-value pairs.
parameter_tuples = [(transform_op(param), transform_op(value))
for param, value in (url_parameters or {}).items()]
# Turn parameter-value tuples into a list of strings in the form
# 'PARAMETER=VALUE'.
return ['='.join(x) for x in parameter_tuples]
def BuildUri(uri, url_params=None, escape_params=True):
"""Converts a uri string and a collection of parameters into a URI.
This function is deprcated, use atom.url.Url instead.
Args:
uri: string
url_params: dict (optional)
escape_params: boolean (optional)
uri: string The start of the desired URI. This string can alrady contain
URL parameters. Examples: '/base/feeds/snippets',
'/base/feeds/snippets?bq=digital+camera'
url_parameters: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
string The URI consisting of the escaped URL parameters appended to the
initial uri string.
"""
# Prepare URL parameters for inclusion into the GET request.
parameter_list = DictionaryToParamList(url_params, escape_params)
# Append the URL parameters to the URL.
if parameter_list:
if uri.find('?') != -1:
# If there are already URL parameters in the uri string, add the
# parameters after a new & character.
full_uri = '&'.join([uri] + parameter_list)
else:
# The uri string did not have any URL parameters (no ? character)
# so put a ? between the uri and URL parameters.
full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list)))
else:
full_uri = uri
return full_uri
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This method is deprecated, use atom.http.HttpClient.request instead.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.service
client = atom.service.AtomService()
http_response = client.Get('http://www.google.com/')
or you could set the client.server to 'www.google.com' and use the
following:
client.server = 'www.google.com'
http_response = client.Get('/')
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
deprecation('call to deprecated function HttpRequest')
full_uri = BuildUri(uri, url_params, escape_params)
(connection, full_uri) = PrepareConnection(service, full_uri)
if extra_headers is None:
extra_headers = {}
# Turn on debug mode if the debug member is set.
if service.debug:
connection.debuglevel = 1
connection.putrequest(operation, full_uri)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if (data and not service.additional_headers.has_key('Content-Length') and
not extra_headers.has_key('Content-Length')):
content_length = CalculateDataLength(data)
if content_length:
extra_headers['Content-Length'] = str(content_length)
if content_type:
extra_headers['Content-Type'] = content_type
# Send the HTTP headers.
if isinstance(service.additional_headers, dict):
for header in service.additional_headers:
connection.putheader(header, service.additional_headers[header])
if isinstance(extra_headers, dict):
for header in extra_headers:
connection.putheader(header, extra_headers[header])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
__SendDataPart(data_part, connection)
else:
__SendDataPart(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def __SendDataPart(data, connection):
"""This method is deprecated, use atom.http._send_data_part"""
deprecated('call to deprecated function __SendDataPart')
if isinstance(data, str):
#TODO add handling for unicode.
connection.send(data)
return
elif ElementTree.iselement(data):
connection.send(ElementTree.tostring(data))
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
def CalculateDataLength(data):
"""Attempts to determine the length of the data to send.
This method will respond with a length only if the data is a string or
and ElementTree element.
Args:
data: object If this is not a string or ElementTree element this funtion
will return None.
"""
if isinstance(data, str):
return len(data)
elif isinstance(data, list):
return None
elif ElementTree.iselement(data):
return len(ElementTree.tostring(data))
elif hasattr(data, 'read'):
# If this is a file-like object, don't try to guess the length.
return None
else:
return len(str(data))
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=2)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a common interface for all HTTP requests.
HttpResponse: Represents the server's response to an HTTP request. Provides
an interface identical to httplib.HTTPResponse which is the response
expected from higher level classes which use HttpClient.request.
GenericHttpClient: Provides an interface (superclass) for an object
responsible for making HTTP requests. Subclasses of this object are
used in AtomService and GDataService to make requests to the server. By
changing the http_client member object, the AtomService is able to make
HTTP requests using different logic (for example, when running on
Google App Engine, the http_client makes requests using the App Engine
urlfetch API).
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
USER_AGENT = '%s GData-Python/2.0.9'
class Error(Exception):
pass
class UnparsableUrlObject(Error):
pass
class ContentLengthRequired(Error):
pass
class HttpResponse(object):
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Constructor for an HttpResponse object.
HttpResponse represents the server's response to an HTTP request from
the client. The HttpClient.request method returns a httplib.HTTPResponse
object and this HttpResponse class is designed to mirror the interface
exposed by httplib.HTTPResponse.
Args:
body: A file like object, with a read() method. The body could also
be a string, and the constructor will wrap it so that
HttpResponse.read(self) will return the full string.
status: The HTTP status code as an int. Example: 200, 201, 404.
reason: The HTTP status message which follows the code. Example:
OK, Created, Not Found
headers: A dictionary containing the HTTP headers in the server's
response. A common header in the response is Content-Length.
"""
if body:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
else:
self._body = None
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def read(self, amt=None):
if not amt:
return self._body.read()
else:
return self._body.read(amt)
class GenericHttpClient(object):
debug = False
def __init__(self, http_client, headers=None):
"""
Args:
http_client: An object which provides a request method to make an HTTP
request. The request method in GenericHttpClient performs a
call-through to the contained HTTP client object.
headers: A dictionary containing HTTP headers which should be included
in every HTTP request. Common persistent headers include
'User-Agent'.
"""
self.http_client = http_client
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
return self.http_client.request(operation, url, data=data,
headers=all_headers)
def get(self, url, headers=None):
return self.request('GET', url, headers=headers)
def post(self, url, data, headers=None):
return self.request('POST', url, data=data, headers=headers)
def put(self, url, data, headers=None):
return self.request('PUT', url, data=data, headers=headers)
def delete(self, url, headers=None):
return self.request('DELETE', url, headers=headers)
class GenericToken(object):
"""Represents an Authorization token to be added to HTTP requests.
Some Authorization headers included calculated fields (digital
signatures for example) which are based on the parameters of the HTTP
request. Therefore the token is responsible for signing the request
and adding the Authorization header.
"""
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""For the GenericToken, no Authorization token is set."""
return http_client.request(operation, url, data=data, headers=headers)
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
Since the generic token doesn't add an auth header, it is not valid for
any scope.
"""
return False
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
'''
Load site info.
'''
from framework import cache
from framework import store
import runtime
SITE_GROUP = '__site__'
DEFAULT_DATE = '%Y-%m-%d'
DEFAULT_TIME = '%H:%M:%S'
def date_format_samples(dt):
'''
Return date format samples with list of tuple (format, sample).
'''
formats = (DEFAULT_DATE, '%y-%m-%d', '%d/%m/%Y', '%d/%m/%y', '%m/%d/%Y', '%m/%d/%y', '%b %d, %Y', '%b %d, %y', '%B %d, %Y', '%B %d, %y')
return [(f, dt.strftime(f)) for f in formats]
def time_format_samples(dt):
'''
Return time format samples with list of tuple (format, sample).
'''
formats = (DEFAULT_TIME, '%H:%M', '%I:%M:%S %p', '%I:%M %p')
return [(f, dt.strftime(f)) for f in formats]
class Site(object):
'''
Site object that has attributes such as 'title', 'subtitle', etc.
'''
__slots__ = ('title', 'subtitle', 'date_format', 'time_format', 'tz_name', 'tz_h_offset', 'tz_m_offset', 'tz_dst')
defaults = {
'title' : 'ExpressMe',
'subtitle' : 'just another ExpressMe web site',
'date_format' : DEFAULT_DATE,
'time_format' : DEFAULT_TIME,
'tz_name' : runtime.UTC_NAME,
'tz_h_offset' : 0,
'tz_m_offset' : 0,
'tz_dst' : 0,
}
def __init__(self, **kw):
for key in self.__slots__:
if key in kw:
setattr(self, key, kw[key])
else:
setattr(self, key, Site.defaults[key])
def get_tzinfo(self):
return runtime.UserTimeZone(self.tz_name, int(self.tz_h_offset), int(self.tz_m_offset), int(self.tz_dst))
def get_site_settings(use_cache=True):
'''
Get site as a site object which has attribute of 'title', 'subtitle', etc.
Args:
use_cache: True if use cache, default to True.
'''
if use_cache:
return cache.get(SITE_GROUP, _get_from_store, 3600)
return _get_from_store()
def set_site_settings(**kw):
'''
Set site info.
Args:
keyword args support 'title', 'subtitle', etc.
'''
store.delete_settings(SITE_GROUP)
site = Site(**kw)
for key in site.__slots__:
store.set_setting(key, str(getattr(site, key)), SITE_GROUP)
cache.delete(SITE_GROUP)
def _get_from_store():
site_dict = store.get_settings(SITE_GROUP)
kw = {}
for k in site_dict.keys():
kw[str(k)] = site_dict[k]
return Site(**kw)
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Spreadsheets API.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import gdata.data
GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended'
INSERT_MODE = 'insert'
OVERWRITE_MODE = 'overwrite'
WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed'
class Error(Exception):
pass
class FieldMissing(Exception):
pass
class HeaderNotSet(Error):
"""The desired column header had no value for the row in the list feed."""
class Cell(atom.core.XmlElement):
"""The gs:cell element.
A cell in the worksheet. The <gs:cell> element can appear only as a child
of <atom:entry>.
"""
_qname = GS_TEMPLATE % 'cell'
col = 'col'
input_value = 'inputValue'
numeric_value = 'numericValue'
row = 'row'
class ColCount(atom.core.XmlElement):
"""The gs:colCount element.
Indicates the number of columns in the worksheet, including columns that
contain only empty cells. The <gs:colCount> element can appear as a child
of <atom:entry> or <atom:feed>
"""
_qname = GS_TEMPLATE % 'colCount'
class Field(atom.core.XmlElement):
"""The gs:field element.
A field single cell within a record. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'field'
index = 'index'
name = 'name'
class Column(Field):
"""The gs:column element."""
_qname = GS_TEMPLATE % 'column'
class Data(atom.core.XmlElement):
"""The gs:data element.
A data region of a table. Contained in an <atom:entry> element.
"""
_qname = GS_TEMPLATE % 'data'
column = [Column]
insertion_mode = 'insertionMode'
num_rows = 'numRows'
start_row = 'startRow'
class Header(atom.core.XmlElement):
"""The gs:header element.
Indicates which row is the header row. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'header'
row = 'row'
class RowCount(atom.core.XmlElement):
"""The gs:rowCount element.
Indicates the number of total rows in the worksheet, including rows that
contain only empty cells. The <gs:rowCount> element can appear as a
child of <atom:entry> or <atom:feed>.
"""
_qname = GS_TEMPLATE % 'rowCount'
class Worksheet(atom.core.XmlElement):
"""The gs:worksheet element.
The worksheet where the table lives.Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'worksheet'
name = 'name'
class Spreadsheet(gdata.data.GDEntry):
"""An Atom entry which represents a Google Spreadsheet."""
def find_worksheets_feed(self):
return self.find_url(WORKSHEETS_REL)
FindWorksheetsFeed = find_worksheets_feed
class SpreadsheetsFeed(gdata.data.GDFeed):
"""An Atom feed listing a user's Google Spreadsheets."""
entry = [Spreadsheet]
class WorksheetEntry(gdata.data.GDEntry):
"""An Atom entry representing a single worksheet in a spreadsheet."""
row_count = RowCount
col_count = ColCount
class WorksheetsFeed(gdata.data.GDFeed):
"""A feed containing the worksheets in a single spreadsheet."""
entry = [WorksheetEntry]
class Table(gdata.data.GDEntry):
"""An Atom entry that represents a subsection of a worksheet.
A table allows you to treat part or all of a worksheet somewhat like a
table in a database that is, as a set of structured data items. Tables
don't exist until you explicitly create them before you can use a table
feed, you have to explicitly define where the table data comes from.
"""
data = Data
header = Header
worksheet = Worksheet
def get_table_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
GetTableId = get_table_id
class TablesFeed(gdata.data.GDFeed):
"""An Atom feed containing the tables defined within a worksheet."""
entry = [Table]
class Record(gdata.data.GDEntry):
"""An Atom entry representing a single record in a table.
Note that the order of items in each record is the same as the order of
columns in the table definition, which may not match the order of
columns in the GUI.
"""
field = [Field]
def value_for_index(self, column_index):
for field in self.field:
if field.index == column_index:
return field.text
raise FieldMissing('There is no field for %s' % column_index)
ValueForIndex = value_for_index
def value_for_name(self, name):
for field in self.field:
if field.name == name:
return field.text
raise FieldMissing('There is no field for %s' % name)
ValueForName = value_for_name
def get_record_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
class RecordsFeed(gdata.data.GDFeed):
"""An Atom feed containing the individuals records in a table."""
entry = [Record]
class ListRow(atom.core.XmlElement):
"""A gsx column value within a row.
The local tag in the _qname is blank and must be set to the column
name. For example, when adding to a ListEntry, do:
col_value = ListRow(text='something')
col_value._qname = col_value._qname % 'mycolumnname'
"""
_qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s'
class ListEntry(gdata.data.GDEntry):
"""An Atom entry representing a worksheet row in the list feed.
The values for a particular column can be get and set using
x.get_value('columnheader') and x.set_value('columnheader', 'value').
See also the explanation of column names in the ListFeed class.
"""
def get_value(self, column_name):
"""Returns the displayed text for the desired column in this row.
The formula or input which generated the displayed value is not accessible
through the list feed, to see the user's input, use the cells feed.
If a column is not present in this spreadsheet, or there is no value
for a column in this row, this method will return None.
"""
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) == 0:
return None
return values[0].text
def set_value(self, column_name, value):
"""Changes the value of cell in this row under the desired column name.
Warning: if the cell contained a formula, it will be wiped out by setting
the value using the list feed since the list feed only works with
displayed values.
No client side checking is performed on the column_name, you need to
ensure that the column_name is the local tag name in the gsx tag for the
column. For example, the column_name will not contain special characters,
spaces, uppercase letters, etc.
"""
# Try to find the column in this row to change an existing value.
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) > 0:
values[0].text = value
else:
# There is no value in this row for the desired column, so add a new
# gsx:column_name element.
new_value = ListRow(text=value)
new_value._qname = new_value._qname % (column_name,)
self._other_elements.append(new_value)
class ListsFeed(gdata.data.GDFeed):
"""An Atom feed in which each entry represents a row in a worksheet.
The first row in the worksheet is used as the column names for the values
in each row. If a header cell is empty, then a unique column ID is used
for the gsx element name.
Spaces in a column name are removed from the name of the corresponding
gsx element.
Caution: The columnNames are case-insensitive. For example, if you see
a <gsx:e-mail> element in a feed, you can't know whether the column
heading in the original worksheet was "e-mail" or "E-Mail".
Note: If two or more columns have the same name, then subsequent columns
of the same name have _n appended to the columnName. For example, if the
first column name is "e-mail", followed by columns named "E-Mail" and
"E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and
gsx:e-mail_3 respectively.
"""
entry = [ListEntry]
class CellEntry(gdata.data.BatchEntry):
"""An Atom entry representing a single cell in a worksheet."""
cell = Cell
class CellsFeed(gdata.data.BatchFeed):
"""An Atom feed contains one entry per cell in a worksheet.
The cell feed supports batch operations, you can send multiple cell
operations in one HTTP request.
"""
entry = [CellEntry]
def batch_set_cell(row, col, input):
pass
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a client to communicate with the Google Spreadsheets servers.
For documentation on the Spreadsheets API, see:
http://code.google.com/apis/spreadsheets/
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import gdata.client
import gdata.gauth
import gdata.spreadsheets.data
import atom.data
import atom.http_core
SPREADSHEETS_URL = ('http://spreadsheets.google.com/feeds/spreadsheets'
'/private/full')
WORKSHEETS_URL = ('http://spreadsheets.google.com/feeds/worksheets/'
'%s/private/full')
WORKSHEET_URL = ('http://spreadsheets.google.com/feeds/worksheets/'
'%s/private/full/%s')
TABLES_URL = 'http://spreadsheets.google.com/feeds/%s/tables'
RECORDS_URL = 'http://spreadsheets.google.com/feeds/%s/records/%s'
RECORD_URL = 'http://spreadsheets.google.com/feeds/%s/records/%s/%s'
class SpreadsheetsClient(gdata.client.GDClient):
api_version = '3'
auth_service = 'wise'
auth_scopes = gdata.gauth.AUTH_SCOPES['wise']
def get_spreadsheets(self, auth_token=None,
desired_class=gdata.spreadsheets.data.SpreadsheetsFeed,
**kwargs):
"""Obtains a feed with the spreadsheets belonging to the current user.
Args:
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient.
desired_class: class descended from atom.core.XmlElement to which a
successful response should be converted. If there is no
converter function specified (converter=None) then the
desired_class will be used in calling the
atom.core.parse function. If neither
the desired_class nor the converter is specified, an
HTTP reponse object will be returned. Defaults to
gdata.spreadsheets.data.SpreadsheetsFeed.
"""
return self.get_feed(SPREADSHEETS_URL, auth_token=auth_token,
desired_class=desired_class, **kwargs)
GetSpreadsheets = get_spreadsheets
def get_worksheets(self, spreadsheet_key, auth_token=None,
desired_class=gdata.spreadsheets.data.WorksheetsFeed,
**kwargs):
"""Finds the worksheets within a given spreadsheet.
Args:
spreadsheet_key: str, The unique ID of this containing spreadsheet. This
can be the ID from the URL or as provided in a
Spreadsheet entry.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient.
desired_class: class descended from atom.core.XmlElement to which a
successful response should be converted. If there is no
converter function specified (converter=None) then the
desired_class will be used in calling the
atom.core.parse function. If neither
the desired_class nor the converter is specified, an
HTTP reponse object will be returned. Defaults to
gdata.spreadsheets.data.WorksheetsFeed.
"""
return self.get_feed(WORKSHEETS_URL % spreadsheet_key,
auth_token=auth_token, desired_class=desired_class,
**kwargs)
GetWorksheets = get_worksheets
def add_worksheet(self, spreadsheet_key, title, rows, cols,
auth_token=None, **kwargs):
"""Creates a new worksheet entry in the spreadsheet.
Args:
spreadsheet_key: str, The unique ID of this containing spreadsheet. This
can be the ID from the URL or as provided in a
Spreadsheet entry.
title: str, The title to be used in for the worksheet.
rows: str or int, The number of rows this worksheet should start with.
cols: str or int, The number of columns this worksheet should start with.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient.
"""
new_worksheet = gdata.spreadsheets.data.WorksheetEntry(
title=atom.data.Title(text=title),
row_count=gdata.spreadsheets.data.RowCount(text=str(rows)),
col_count=gdata.spreadsheets.data.ColCount(text=str(cols)))
return self.post(new_worksheet, WORKSHEETS_URL % spreadsheet_key,
auth_token=auth_token, **kwargs)
AddWorksheet = add_worksheet
def get_worksheet(self, spreadsheet_key, worksheet_id,
desired_class=gdata.spreadsheets.data.WorksheetEntry,
auth_token=None, **kwargs):
"""Retrieves a single worksheet.
Args:
spreadsheet_key: str, The unique ID of this containing spreadsheet. This
can be the ID from the URL or as provided in a
Spreadsheet entry.
worksheet_id: str, The unique ID for the worksheet withing the desired
spreadsheet.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient.
desired_class: class descended from atom.core.XmlElement to which a
successful response should be converted. If there is no
converter function specified (converter=None) then the
desired_class will be used in calling the
atom.core.parse function. If neither
the desired_class nor the converter is specified, an
HTTP reponse object will be returned. Defaults to
gdata.spreadsheets.data.WorksheetEntry.
"""
return self.get_entry(WORKSHEET_URL % (spreadsheet_key, worksheet_id,),
auth_token=auth_token, desired_class=desired_class,
**kwargs)
GetWorksheet = get_worksheet
def add_table(self, spreadsheet_key, title, summary, worksheet_name,
header_row, num_rows, start_row, insertion_mode,
column_headers, auth_token=None, **kwargs):
"""Creates a new table within the worksheet.
Args:
spreadsheet_key: str, The unique ID of this containing spreadsheet. This
can be the ID from the URL or as provided in a
Spreadsheet entry.
title: str, The title for the new table within a worksheet.
summary: str, A description of the table.
worksheet_name: str The name of the worksheet in which this table
should live.
header_row: int or str, The number of the row in the worksheet which
will contain the column names for the data in this table.
num_rows: int or str, The number of adjacent rows in this table.
start_row: int or str, The number of the row at which the data begins.
insertion_mode: str
column_headers: dict of strings, maps the column letters (A, B, C) to
the desired name which will be viewable in the
worksheet.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient.
"""
data = gdata.spreadsheets.data.Data(
insertion_mode=insertion_mode, num_rows=str(num_rows),
start_row=str(start_row))
for index, name in column_headers.iteritems():
data.column.append(gdata.spreadsheets.data.Column(
index=index, name=name))
new_table = gdata.spreadsheets.data.Table(
title=atom.data.Title(text=title), summary=atom.data.Summary(summary),
worksheet=gdata.spreadsheets.data.Worksheet(name=worksheet_name),
header=gdata.spreadsheets.data.Header(row=str(header_row)), data=data)
return self.post(new_table, TABLES_URL % spreadsheet_key,
auth_token=auth_token, **kwargs)
AddTable = add_table
def get_tables(self, spreadsheet_key,
desired_class=gdata.spreadsheets.data.TablesFeed,
auth_token=None, **kwargs):
"""Retrieves a feed listing the tables in this spreadsheet.
Args:
spreadsheet_key: str, The unique ID of this containing spreadsheet. This
can be the ID from the URL or as provided in a
Spreadsheet entry.
desired_class: class descended from atom.core.XmlElement to which a
successful response should be converted. If there is no
converter function specified (converter=None) then the
desired_class will be used in calling the
atom.core.parse function. If neither
the desired_class nor the converter is specified, an
HTTP reponse object will be returned. Defaults to
gdata.spreadsheets.data.TablesFeed.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient.
"""
return self.get_feed(TABLES_URL % spreadsheet_key,
desired_class=desired_class, auth_token=auth_token,
**kwargs)
GetTables = get_tables
def add_record(self, spreadsheet_key, table_id, fields,
title=None, auth_token=None, **kwargs):
"""Adds a new row to the table.
Args:
spreadsheet_key: str, The unique ID of this containing spreadsheet. This
can be the ID from the URL or as provided in a
Spreadsheet entry.
table_id: str, The ID of the table within the worksheet which should
receive this new record. The table ID can be found using the
get_table_id method of a gdata.spreadsheets.data.Table.
fields: dict of strings mapping column names to values.
title: str, optional The title for this row.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient.
"""
new_record = gdata.spreadsheets.data.Record()
if title is not None:
new_record.title = atom.data.Title(text=title)
for name, value in fields.iteritems():
new_record.field.append(gdata.spreadsheets.data.Field(
name=name, text=value))
return self.post(new_record, RECORDS_URL % (spreadsheet_key, table_id),
auth_token=auth_token, **kwargs)
AddRecord = add_record
def get_records(self, spreadsheet_key, table_id,
desired_class=gdata.spreadsheets.data.RecordsFeed,
auth_token=None, **kwargs):
"""Retrieves the records in a table.
Args:
spreadsheet_key: str, The unique ID of this containing spreadsheet. This
can be the ID from the URL or as provided in a
Spreadsheet entry.
table_id: str, The ID of the table within the worksheet whose records
we would like to fetch. The table ID can be found using the
get_table_id method of a gdata.spreadsheets.data.Table.
desired_class: class descended from atom.core.XmlElement to which a
successful response should be converted. If there is no
converter function specified (converter=None) then the
desired_class will be used in calling the
atom.core.parse function. If neither
the desired_class nor the converter is specified, an
HTTP reponse object will be returned. Defaults to
gdata.spreadsheets.data.RecordsFeed.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient.
"""
return self.get_feed(RECORDS_URL % (spreadsheet_key, table_id),
desired_class=desired_class, auth_token=auth_token,
**kwargs)
GetRecords = get_records
def get_record(self, spreadsheet_key, table_id, record_id,
desired_class=gdata.spreadsheets.data.Record,
auth_token=None, **kwargs):
"""Retrieves a single record from the table.
Args:
spreadsheet_key: str, The unique ID of this containing spreadsheet. This
can be the ID from the URL or as provided in a
Spreadsheet entry.
table_id: str, The ID of the table within the worksheet whose records
we would like to fetch. The table ID can be found using the
get_table_id method of a gdata.spreadsheets.data.Table.
record_id: str, The ID of the record within this table which we want to
fetch. You can find the record ID using get_record_id() on
an instance of the gdata.spreadsheets.data.Record class.
desired_class: class descended from atom.core.XmlElement to which a
successful response should be converted. If there is no
converter function specified (converter=None) then the
desired_class will be used in calling the
atom.core.parse function. If neither
the desired_class nor the converter is specified, an
HTTP reponse object will be returned. Defaults to
gdata.spreadsheets.data.RecordsFeed.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient."""
return self.get_entry(RECORD_URL % (spreadsheet_key, table_id, record_id),
desired_class=desired_class, auth_token=auth_token,
**kwargs)
GetRecord = get_record
class SpreadsheetQuery(gdata.client.Query):
def __init__(self, title=None, title_exact=None, **kwargs):
"""Adds Spreadsheets feed query parameters to a request.
Args:
title: str Specifies the search terms for the title of a document.
This parameter used without title-exact will only submit partial
queries, not exact queries.
title_exact: str Specifies whether the title query should be taken as an
exact string. Meaningless without title. Possible values are
'true' and 'false'.
"""
gdata.client.Query.__init__(self, **kwargs)
self.title = title
self.title_exact = title_exact
def modify_request(self, http_request):
gdata.client._add_query_param('title', self.title, http_request)
gdata.client._add_query_param('title-exact', self.title_exact,
http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
class WorksheetQuery(SpreadsheetQuery):
pass
class ListQuery(gdata.client.Query):
def __init__(self, order_by=None, reverse=None, sq=None, **kwargs):
"""Adds List-feed specific query parameters to a request.
Args:
order_by: str Specifies what column to use in ordering the entries in
the feed. By position (the default): 'position' returns
rows in the order in which they appear in the GUI. Row 1, then
row 2, then row 3, and so on. By column:
'column:columnName' sorts rows in ascending order based on the
values in the column with the given columnName, where
columnName is the value in the header row for that column.
reverse: str Specifies whether to sort in descending or ascending order.
Reverses default sort order: 'true' results in a descending
sort; 'false' (the default) results in an ascending sort.
sq: str Structured query on the full text in the worksheet.
[columnName][binaryOperator][value]
Supported binaryOperators are:
- (), for overriding order of operations
- = or ==, for strict equality
- <> or !=, for strict inequality
- and or &&, for boolean and
- or or ||, for boolean or
"""
gdata.client.Query.__init__(self, **kwargs)
self.order_by = order_by
self.reverse = reverse
self.sq = sq
def modify_request(self, http_request):
gdata.client._add_query_param('orderby', self.order_by, http_request)
gdata.client._add_query_param('reverse', self.reverse, http_request)
gdata.client._add_query_param('sq', self.sq, http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
class TableQuery(ListQuery):
pass
class CellQuery(gdata.client.Query):
def __init__(self, min_row=None, max_row=None, min_col=None, max_col=None,
range=None, return_empty=None, **kwargs):
"""Adds Cells-feed specific query parameters to a request.
Args:
min_row: str or int Positional number of minimum row returned in query.
max_row: str or int Positional number of maximum row returned in query.
min_col: str or int Positional number of minimum column returned in query.
max_col: str or int Positional number of maximum column returned in query.
range: str A single cell or a range of cells. Use standard spreadsheet
cell-range notations, using a colon to separate start and end of
range. Examples:
- 'A1' and 'R1C1' both specify only cell A1.
- 'D1:F3' and 'R1C4:R3C6' both specify the rectangle of cells with
corners at D1 and F3.
return_empty: str If 'true' then empty cells will be returned in the feed.
If omitted, the default is 'false'.
"""
gdata.client.Query.__init__(self, **kwargs)
self.min_row = min_row
self.max_row = max_row
self.min_col = min_col
self.max_col = max_col
self.range = range
self.return_empty = return_empty
def modify_request(self, http_request):
gdata.client._add_query_param('min-row', self.min_row, http_request)
gdata.client._add_query_param('max-row', self.max_row, http_request)
gdata.client._add_query_param('min-col', self.min_col, http_request)
gdata.client._add_query_param('max-col', self.max_col, http_request)
gdata.client._add_query_param('range', self.range, http_request)
gdata.client._add_query_param('return-empty', self.return_empty,
http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 - 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import math
import random
import re
import time
import types
import urllib
import atom.http_interface
import atom.token_store
import atom.url
import gdata.oauth as oauth
import gdata.oauth.rsa as oauth_rsa
import gdata.tlslite.utils.keyfactory as keyfactory
import gdata.tlslite.utils.cryptomath as cryptomath
import gdata.gauth
__author__ = 'api.jscudder (Jeff Scudder)'
PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth='
AUTHSUB_AUTH_LABEL = 'AuthSub token='
"""This module provides functions and objects used with Google authentication.
Details on Google authorization mechanisms used with the Google Data APIs can
be found here:
http://code.google.com/apis/gdata/auth.html
http://code.google.com/apis/accounts/
The essential functions are the following.
Related to ClientLogin:
generate_client_login_request_body: Constructs the body of an HTTP request to
obtain a ClientLogin token for a specific
service.
extract_client_login_token: Creates a ClientLoginToken with the token from a
success response to a ClientLogin request.
get_captcha_challenge: If the server responded to the ClientLogin request
with a CAPTCHA challenge, this method extracts the
CAPTCHA URL and identifying CAPTCHA token.
Related to AuthSub:
generate_auth_sub_url: Constructs a full URL for a AuthSub request. The
user's browser must be sent to this Google Accounts
URL and redirected back to the app to obtain the
AuthSub token.
extract_auth_sub_token_from_url: Once the user's browser has been
redirected back to the web app, use this
function to create an AuthSubToken with
the correct authorization token and scope.
token_from_http_body: Extracts the AuthSubToken value string from the
server's response to an AuthSub session token upgrade
request.
"""
def generate_client_login_request_body(email, password, service, source,
account_type='HOSTED_OR_GOOGLE', captcha_token=None,
captcha_response=None):
"""Creates the body of the autentication request
See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request
for more details.
Args:
email: str
password: str
service: str
source: str
account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid
values are 'GOOGLE' and 'HOSTED'
captcha_token: str (optional)
captcha_response: str (optional)
Returns:
The HTTP body to send in a request for a client login token.
"""
return gdata.gauth.generate_client_login_request_body(email, password,
service, source, account_type, captcha_token, captcha_response)
GenerateClientLoginRequestBody = generate_client_login_request_body
def GenerateClientLoginAuthToken(http_body):
"""Returns the token value to use in Authorization headers.
Reads the token from the server's response to a Client Login request and
creates header value to use in requests.
Args:
http_body: str The body of the server's HTTP response to a Client Login
request
Returns:
The value half of an Authorization header.
"""
token = get_client_login_token(http_body)
if token:
return 'GoogleLogin auth=%s' % token
return None
def get_client_login_token(http_body):
"""Returns the token value for a ClientLoginToken.
Reads the token from the server's response to a Client Login request and
creates the token value string to use in requests.
Args:
http_body: str The body of the server's HTTP response to a Client Login
request
Returns:
The token value string for a ClientLoginToken.
"""
return gdata.gauth.get_client_login_token_string(http_body)
def extract_client_login_token(http_body, scopes):
"""Parses the server's response and returns a ClientLoginToken.
Args:
http_body: str The body of the server's HTTP response to a Client Login
request. It is assumed that the login request was successful.
scopes: list containing atom.url.Urls or strs. The scopes list contains
all of the partial URLs under which the client login token is
valid. For example, if scopes contains ['http://example.com/foo']
then the client login token would be valid for
http://example.com/foo/bar/baz
Returns:
A ClientLoginToken which is valid for the specified scopes.
"""
token_string = get_client_login_token(http_body)
token = ClientLoginToken(scopes=scopes)
token.set_token_string(token_string)
return token
def get_captcha_challenge(http_body,
captcha_base_url='http://www.google.com/accounts/'):
"""Returns the URL and token for a CAPTCHA challenge issued by the server.
Args:
http_body: str The body of the HTTP response from the server which
contains the CAPTCHA challenge.
captcha_base_url: str This function returns a full URL for viewing the
challenge image which is built from the server's response. This
base_url is used as the beginning of the URL because the server
only provides the end of the URL. For example the server provides
'Captcha?ctoken=Hi...N' and the URL for the image is
'http://www.google.com/accounts/Captcha?ctoken=Hi...N'
Returns:
A dictionary containing the information needed to repond to the CAPTCHA
challenge, the image URL and the ID token of the challenge. The
dictionary is in the form:
{'token': string identifying the CAPTCHA image,
'url': string containing the URL of the image}
Returns None if there was no CAPTCHA challenge in the response.
"""
return gdata.gauth.get_captcha_challenge(http_body, captcha_base_url)
GetCaptchaChallenge = get_captcha_challenge
def GenerateOAuthRequestTokenUrl(
oauth_input_params, scopes,
request_token_url='https://www.google.com/accounts/OAuthGetRequestToken',
extra_parameters=None):
"""Generate a URL at which a request for OAuth request token is to be sent.
Args:
oauth_input_params: OAuthInputParams OAuth input parameters.
scopes: list of strings The URLs of the services to be accessed.
request_token_url: string The beginning of the request token URL. This is
normally 'https://www.google.com/accounts/OAuthGetRequestToken' or
'/accounts/OAuthGetRequestToken'
extra_parameters: dict (optional) key-value pairs as any additional
parameters to be included in the URL and signature while making a
request for fetching an OAuth request token. All the OAuth parameters
are added by default. But if provided through this argument, any
default parameters will be overwritten. For e.g. a default parameter
oauth_version 1.0 can be overwritten if
extra_parameters = {'oauth_version': '2.0'}
Returns:
atom.url.Url OAuth request token URL.
"""
scopes_string = ' '.join([str(scope) for scope in scopes])
parameters = {'scope': scopes_string}
if extra_parameters:
parameters.update(extra_parameters)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
oauth_input_params.GetConsumer(), http_url=request_token_url,
parameters=parameters)
oauth_request.sign_request(oauth_input_params.GetSignatureMethod(),
oauth_input_params.GetConsumer(), None)
return atom.url.parse_url(oauth_request.to_url())
def GenerateOAuthAuthorizationUrl(
request_token,
authorization_url='https://www.google.com/accounts/OAuthAuthorizeToken',
callback_url=None, extra_params=None,
include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope'):
"""Generates URL at which user will login to authorize the request token.
Args:
request_token: gdata.auth.OAuthToken OAuth request token.
authorization_url: string The beginning of the authorization URL. This is
normally 'https://www.google.com/accounts/OAuthAuthorizeToken' or
'/accounts/OAuthAuthorizeToken'
callback_url: string (optional) The URL user will be sent to after
logging in and granting access.
extra_params: dict (optional) Additional parameters to be sent.
include_scopes_in_callback: Boolean (default=False) if set to True, and
if 'callback_url' is present, the 'callback_url' will be modified to
include the scope(s) from the request token as a URL parameter. The
key for the 'callback' URL's scope parameter will be
OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
a parameter to the 'callback' URL, is that the page which receives
the OAuth token will be able to tell which URLs the token grants
access to.
scopes_param_prefix: string (default='oauth_token_scope') The URL
parameter key which maps to the list of valid scopes for the token.
This URL parameter will be included in the callback URL along with
the scopes of the token as value if include_scopes_in_callback=True.
Returns:
atom.url.Url OAuth authorization URL.
"""
scopes = request_token.scopes
if isinstance(scopes, list):
scopes = ' '.join(scopes)
if include_scopes_in_callback and callback_url:
if callback_url.find('?') > -1:
callback_url += '&'
else:
callback_url += '?'
callback_url += urllib.urlencode({scopes_param_prefix:scopes})
oauth_token = oauth.OAuthToken(request_token.key, request_token.secret)
oauth_request = oauth.OAuthRequest.from_token_and_callback(
token=oauth_token, callback=callback_url,
http_url=authorization_url, parameters=extra_params)
return atom.url.parse_url(oauth_request.to_url())
def GenerateOAuthAccessTokenUrl(
authorized_request_token,
oauth_input_params,
access_token_url='https://www.google.com/accounts/OAuthGetAccessToken',
oauth_version='1.0',
oauth_verifier=None):
"""Generates URL at which user will login to authorize the request token.
Args:
authorized_request_token: gdata.auth.OAuthToken OAuth authorized request
token.
oauth_input_params: OAuthInputParams OAuth input parameters.
access_token_url: string The beginning of the authorization URL. This is
normally 'https://www.google.com/accounts/OAuthGetAccessToken' or
'/accounts/OAuthGetAccessToken'
oauth_version: str (default='1.0') oauth_version parameter.
oauth_verifier: str (optional) If present, it is assumed that the client
will use the OAuth v1.0a protocol which includes passing the
oauth_verifier (as returned by the SP) in the access token step.
Returns:
atom.url.Url OAuth access token URL.
"""
oauth_token = oauth.OAuthToken(authorized_request_token.key,
authorized_request_token.secret)
parameters = {'oauth_version': oauth_version}
if oauth_verifier is not None:
parameters['oauth_verifier'] = oauth_verifier
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
oauth_input_params.GetConsumer(), token=oauth_token,
http_url=access_token_url, parameters=parameters)
oauth_request.sign_request(oauth_input_params.GetSignatureMethod(),
oauth_input_params.GetConsumer(), oauth_token)
return atom.url.parse_url(oauth_request.to_url())
def GenerateAuthSubUrl(next, scope, secure=False, session=True,
request_url='https://www.google.com/accounts/AuthSubRequest',
domain='default'):
"""Generate a URL at which the user will login and be redirected back.
Users enter their credentials on a Google login page and a token is sent
to the URL specified in next. See documentation for AuthSub login at:
http://code.google.com/apis/accounts/AuthForWebApps.html
Args:
request_url: str The beginning of the request URL. This is normally
'http://www.google.com/accounts/AuthSubRequest' or
'/accounts/AuthSubRequest'
next: string The URL user will be sent to after logging in.
scope: string The URL of the service to be accessed.
secure: boolean (optional) Determines whether or not the issued token
is a secure token.
session: boolean (optional) Determines whether or not the issued token
can be upgraded to a session token.
domain: str (optional) The Google Apps domain for this account. If this
is not a Google Apps account, use 'default' which is the default
value.
"""
# Translate True/False values for parameters into numeric values acceoted
# by the AuthSub service.
if secure:
secure = 1
else:
secure = 0
if session:
session = 1
else:
session = 0
request_params = urllib.urlencode({'next': next, 'scope': scope,
'secure': secure, 'session': session,
'hd': domain})
if request_url.find('?') == -1:
return '%s?%s' % (request_url, request_params)
else:
# The request URL already contained url parameters so we should add
# the parameters using the & seperator
return '%s&%s' % (request_url, request_params)
def generate_auth_sub_url(next, scopes, secure=False, session=True,
request_url='https://www.google.com/accounts/AuthSubRequest',
domain='default', scopes_param_prefix='auth_sub_scopes'):
"""Constructs a URL string for requesting a multiscope AuthSub token.
The generated token will contain a URL parameter to pass along the
requested scopes to the next URL. When the Google Accounts page
redirects the broswser to the 'next' URL, it appends the single use
AuthSub token value to the URL as a URL parameter with the key 'token'.
However, the information about which scopes were requested is not
included by Google Accounts. This method adds the scopes to the next
URL before making the request so that the redirect will be sent to
a page, and both the token value and the list of scopes can be
extracted from the request URL.
Args:
next: atom.url.URL or string The URL user will be sent to after
authorizing this web application to access their data.
scopes: list containint strings The URLs of the services to be accessed.
secure: boolean (optional) Determines whether or not the issued token
is a secure token.
session: boolean (optional) Determines whether or not the issued token
can be upgraded to a session token.
request_url: atom.url.Url or str The beginning of the request URL. This
is normally 'http://www.google.com/accounts/AuthSubRequest' or
'/accounts/AuthSubRequest'
domain: The domain which the account is part of. This is used for Google
Apps accounts, the default value is 'default' which means that the
requested account is a Google Account (@gmail.com for example)
scopes_param_prefix: str (optional) The requested scopes are added as a
URL parameter to the next URL so that the page at the 'next' URL can
extract the token value and the valid scopes from the URL. The key
for the URL parameter defaults to 'auth_sub_scopes'
Returns:
An atom.url.Url which the user's browser should be directed to in order
to authorize this application to access their information.
"""
if isinstance(next, (str, unicode)):
next = atom.url.parse_url(next)
scopes_string = ' '.join([str(scope) for scope in scopes])
next.params[scopes_param_prefix] = scopes_string
if isinstance(request_url, (str, unicode)):
request_url = atom.url.parse_url(request_url)
request_url.params['next'] = str(next)
request_url.params['scope'] = scopes_string
if session:
request_url.params['session'] = 1
else:
request_url.params['session'] = 0
if secure:
request_url.params['secure'] = 1
else:
request_url.params['secure'] = 0
request_url.params['hd'] = domain
return request_url
def AuthSubTokenFromUrl(url):
"""Extracts the AuthSub token from the URL.
Used after the AuthSub redirect has sent the user to the 'next' page and
appended the token to the URL. This function returns the value to be used
in the Authorization header.
Args:
url: str The URL of the current page which contains the AuthSub token as
a URL parameter.
"""
token = TokenFromUrl(url)
if token:
return 'AuthSub token=%s' % token
return None
def TokenFromUrl(url):
"""Extracts the AuthSub token from the URL.
Returns the raw token value.
Args:
url: str The URL or the query portion of the URL string (after the ?) of
the current page which contains the AuthSub token as a URL parameter.
"""
if url.find('?') > -1:
query_params = url.split('?')[1]
else:
query_params = url
for pair in query_params.split('&'):
if pair.startswith('token='):
return pair[6:]
return None
def extract_auth_sub_token_from_url(url,
scopes_param_prefix='auth_sub_scopes', rsa_key=None):
"""Creates an AuthSubToken and sets the token value and scopes from the URL.
After the Google Accounts AuthSub pages redirect the user's broswer back to
the web application (using the 'next' URL from the request) the web app must
extract the token from the current page's URL. The token is provided as a
URL parameter named 'token' and if generate_auth_sub_url was used to create
the request, the token's valid scopes are included in a URL parameter whose
name is specified in scopes_param_prefix.
Args:
url: atom.url.Url or str representing the current URL. The token value
and valid scopes should be included as URL parameters.
scopes_param_prefix: str (optional) The URL parameter key which maps to
the list of valid scopes for the token.
Returns:
An AuthSubToken with the token value from the URL and set to be valid for
the scopes passed in on the URL. If no scopes were included in the URL,
the AuthSubToken defaults to being valid for no scopes. If there was no
'token' parameter in the URL, this function returns None.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
if 'token' not in url.params:
return None
scopes = []
if scopes_param_prefix in url.params:
scopes = url.params[scopes_param_prefix].split(' ')
token_value = url.params['token']
if rsa_key:
token = SecureAuthSubToken(rsa_key, scopes=scopes)
else:
token = AuthSubToken(scopes=scopes)
token.set_token_string(token_value)
return token
def AuthSubTokenFromHttpBody(http_body):
"""Extracts the AuthSub token from an HTTP body string.
Used to find the new session token after making a request to upgrade a
single use AuthSub token.
Args:
http_body: str The repsonse from the server which contains the AuthSub
key. For example, this function would find the new session token
from the server's response to an upgrade token request.
Returns:
The header value to use for Authorization which contains the AuthSub
token.
"""
token_value = token_from_http_body(http_body)
if token_value:
return '%s%s' % (AUTHSUB_AUTH_LABEL, token_value)
return None
def token_from_http_body(http_body):
"""Extracts the AuthSub token from an HTTP body string.
Used to find the new session token after making a request to upgrade a
single use AuthSub token.
Args:
http_body: str The repsonse from the server which contains the AuthSub
key. For example, this function would find the new session token
from the server's response to an upgrade token request.
Returns:
The raw token value to use in an AuthSubToken object.
"""
for response_line in http_body.splitlines():
if response_line.startswith('Token='):
# Strip off Token= and return the token value string.
return response_line[6:]
return None
TokenFromHttpBody = token_from_http_body
def OAuthTokenFromUrl(url, scopes_param_prefix='oauth_token_scope'):
"""Creates an OAuthToken and sets token key and scopes (if present) from URL.
After the Google Accounts OAuth pages redirect the user's broswer back to
the web application (using the 'callback' URL from the request) the web app
can extract the token from the current page's URL. The token is same as the
request token, but it is either authorized (if user grants access) or
unauthorized (if user denies access). The token is provided as a
URL parameter named 'oauth_token' and if it was chosen to use
GenerateOAuthAuthorizationUrl with include_scopes_in_param=True, the token's
valid scopes are included in a URL parameter whose name is specified in
scopes_param_prefix.
Args:
url: atom.url.Url or str representing the current URL. The token value
and valid scopes should be included as URL parameters.
scopes_param_prefix: str (optional) The URL parameter key which maps to
the list of valid scopes for the token.
Returns:
An OAuthToken with the token key from the URL and set to be valid for
the scopes passed in on the URL. If no scopes were included in the URL,
the OAuthToken defaults to being valid for no scopes. If there was no
'oauth_token' parameter in the URL, this function returns None.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
if 'oauth_token' not in url.params:
return None
scopes = []
if scopes_param_prefix in url.params:
scopes = url.params[scopes_param_prefix].split(' ')
token_key = url.params['oauth_token']
token = OAuthToken(key=token_key, scopes=scopes)
return token
def OAuthTokenFromHttpBody(http_body):
"""Parses the HTTP response body and returns an OAuth token.
The returned OAuth token will just have key and secret parameters set.
It won't have any knowledge about the scopes or oauth_input_params. It is
your responsibility to make it aware of the remaining parameters.
Returns:
OAuthToken OAuth token.
"""
token = oauth.OAuthToken.from_string(http_body)
oauth_token = OAuthToken(key=token.key, secret=token.secret)
return oauth_token
class OAuthSignatureMethod(object):
"""Holds valid OAuth signature methods.
RSA_SHA1: Class to build signature according to RSA-SHA1 algorithm.
HMAC_SHA1: Class to build signature according to HMAC-SHA1 algorithm.
"""
HMAC_SHA1 = oauth.OAuthSignatureMethod_HMAC_SHA1
class RSA_SHA1(oauth_rsa.OAuthSignatureMethod_RSA_SHA1):
"""Provides implementation for abstract methods to return RSA certs."""
def __init__(self, private_key, public_cert):
self.private_key = private_key
self.public_cert = public_cert
def _fetch_public_cert(self, unused_oauth_request):
return self.public_cert
def _fetch_private_cert(self, unused_oauth_request):
return self.private_key
class OAuthInputParams(object):
"""Stores OAuth input parameters.
This class is a store for OAuth input parameters viz. consumer key and secret,
signature method and RSA key.
"""
def __init__(self, signature_method, consumer_key, consumer_secret=None,
rsa_key=None, requestor_id=None):
"""Initializes object with parameters required for using OAuth mechanism.
NOTE: Though consumer_secret and rsa_key are optional, either of the two
is required depending on the value of the signature_method.
Args:
signature_method: class which provides implementation for strategy class
oauth.oauth.OAuthSignatureMethod. Signature method to be used for
signing each request. Valid implementations are provided as the
constants defined by gdata.auth.OAuthSignatureMethod. Currently
they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
gdata.auth.OAuthSignatureMethod.HMAC_SHA1. Instead of passing in
the strategy class, you may pass in a string for 'RSA_SHA1' or
'HMAC_SHA1'. If you plan to use OAuth on App Engine (or another
WSGI environment) I recommend specifying signature method using a
string (the only options are 'RSA_SHA1' and 'HMAC_SHA1'). In these
environments there are sometimes issues with pickling an object in
which a member references a class or function. Storing a string to
refer to the signature method mitigates complications when
pickling.
consumer_key: string Domain identifying third_party web application.
consumer_secret: string (optional) Secret generated during registration.
Required only for HMAC_SHA1 signature method.
rsa_key: string (optional) Private key required for RSA_SHA1 signature
method.
requestor_id: string (optional) User email adress to make requests on
their behalf. This parameter should only be set when performing
2 legged OAuth requests.
"""
if (signature_method == OAuthSignatureMethod.RSA_SHA1
or signature_method == 'RSA_SHA1'):
self.__signature_strategy = 'RSA_SHA1'
elif (signature_method == OAuthSignatureMethod.HMAC_SHA1
or signature_method == 'HMAC_SHA1'):
self.__signature_strategy = 'HMAC_SHA1'
else:
self.__signature_strategy = signature_method
self.rsa_key = rsa_key
self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self.requestor_id = requestor_id
def __get_signature_method(self):
if self.__signature_strategy == 'RSA_SHA1':
return OAuthSignatureMethod.RSA_SHA1(self.rsa_key, None)
elif self.__signature_strategy == 'HMAC_SHA1':
return OAuthSignatureMethod.HMAC_SHA1()
else:
return self.__signature_strategy()
def __set_signature_method(self, signature_method):
if (signature_method == OAuthSignatureMethod.RSA_SHA1
or signature_method == 'RSA_SHA1'):
self.__signature_strategy = 'RSA_SHA1'
elif (signature_method == OAuthSignatureMethod.HMAC_SHA1
or signature_method == 'HMAC_SHA1'):
self.__signature_strategy = 'HMAC_SHA1'
else:
self.__signature_strategy = signature_method
_signature_method = property(__get_signature_method, __set_signature_method,
doc="""Returns object capable of signing the request using RSA of HMAC.
Replaces the _signature_method member to avoid pickle errors.""")
def GetSignatureMethod(self):
"""Gets the OAuth signature method.
Returns:
object of supertype <oauth.oauth.OAuthSignatureMethod>
"""
return self._signature_method
def GetConsumer(self):
"""Gets the OAuth consumer.
Returns:
object of type <oauth.oauth.Consumer>
"""
return self._consumer
class ClientLoginToken(atom.http_interface.GenericToken):
"""Stores the Authorization header in auth_header and adds to requests.
This token will add it's Authorization header to an HTTP request
as it is made. Ths token class is simple but
some Token classes must calculate portions of the Authorization header
based on the request being made, which is why the token is responsible
for making requests via an http_client parameter.
Args:
auth_header: str The value for the Authorization header.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
def __init__(self, auth_header=None, scopes=None):
self.auth_header = auth_header
self.scopes = scopes or []
def __str__(self):
return self.auth_header
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header and makes the HTTP request."""
if headers is None:
headers = {'Authorization':self.auth_header}
else:
headers['Authorization'] = self.auth_header
return http_client.request(operation, url, data=data, headers=headers)
def get_token_string(self):
"""Removes PROGRAMMATIC_AUTH_LABEL to give just the token value."""
return self.auth_header[len(PROGRAMMATIC_AUTH_LABEL):]
def set_token_string(self, token_string):
self.auth_header = '%s%s' % (PROGRAMMATIC_AUTH_LABEL, token_string)
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, (str, unicode)):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
class AuthSubToken(ClientLoginToken):
def get_token_string(self):
"""Removes AUTHSUB_AUTH_LABEL to give just the token value."""
return self.auth_header[len(AUTHSUB_AUTH_LABEL):]
def set_token_string(self, token_string):
self.auth_header = '%s%s' % (AUTHSUB_AUTH_LABEL, token_string)
class OAuthToken(atom.http_interface.GenericToken):
"""Stores the token key, token secret and scopes for which token is valid.
This token adds the authorization header to each request made. It
re-calculates authorization header for every request since the OAuth
signature to be added to the authorization header is dependent on the
request parameters.
Attributes:
key: str The value for the OAuth token i.e. token key.
secret: str The value for the OAuth token secret.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
oauth_input_params: OAuthInputParams OAuth input parameters.
"""
def __init__(self, key=None, secret=None, scopes=None,
oauth_input_params=None):
self.key = key
self.secret = secret
self.scopes = scopes or []
self.oauth_input_params = oauth_input_params
def __str__(self):
return self.get_token_string()
def get_token_string(self):
"""Returns the token string.
The token string returned is of format
oauth_token=[0]&oauth_token_secret=[1], where [0] and [1] are some strings.
Returns:
A token string of format oauth_token=[0]&oauth_token_secret=[1],
where [0] and [1] are some strings. If self.secret is absent, it just
returns oauth_token=[0]. If self.key is absent, it just returns
oauth_token_secret=[1]. If both are absent, it returns None.
"""
if self.key and self.secret:
return urllib.urlencode({'oauth_token': self.key,
'oauth_token_secret': self.secret})
elif self.key:
return 'oauth_token=%s' % self.key
elif self.secret:
return 'oauth_token_secret=%s' % self.secret
else:
return None
def set_token_string(self, token_string):
"""Sets the token key and secret from the token string.
Args:
token_string: str Token string of form
oauth_token=[0]&oauth_token_secret=[1]. If oauth_token is not present,
self.key will be None. If oauth_token_secret is not present,
self.secret will be None.
"""
token_params = cgi.parse_qs(token_string, keep_blank_values=False)
if 'oauth_token' in token_params:
self.key = token_params['oauth_token'][0]
if 'oauth_token_secret' in token_params:
self.secret = token_params['oauth_token_secret'][0]
def GetAuthHeader(self, http_method, http_url, realm=''):
"""Get the authentication header.
Args:
http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc.
http_url: string or atom.url.Url HTTP URL to which request is made.
realm: string (default='') realm parameter to be included in the
authorization header.
Returns:
dict Header to be sent with every subsequent request after
authentication.
"""
if isinstance(http_url, types.StringTypes):
http_url = atom.url.parse_url(http_url)
header = None
token = None
if self.key or self.secret:
token = oauth.OAuthToken(self.key, self.secret)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
self.oauth_input_params.GetConsumer(), token=token,
http_url=str(http_url), http_method=http_method,
parameters=http_url.params)
oauth_request.sign_request(self.oauth_input_params.GetSignatureMethod(),
self.oauth_input_params.GetConsumer(), token)
header = oauth_request.to_header(realm=realm)
header['Authorization'] = header['Authorization'].replace('+', '%2B')
return header
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header and makes the HTTP request."""
if not headers:
headers = {}
if self.oauth_input_params.requestor_id:
url.params['xoauth_requestor_id'] = self.oauth_input_params.requestor_id
headers.update(self.GetAuthHeader(operation, url))
return http_client.request(operation, url, data=data, headers=headers)
def valid_for_scope(self, url):
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, (str, unicode)):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
class SecureAuthSubToken(AuthSubToken):
"""Stores the rsa private key, token, and scopes for the secure AuthSub token.
This token adds the authorization header to each request made. It
re-calculates authorization header for every request since the secure AuthSub
signature to be added to the authorization header is dependent on the
request parameters.
Attributes:
rsa_key: string The RSA private key in PEM format that the token will
use to sign requests
token_string: string (optional) The value for the AuthSub token.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
def __init__(self, rsa_key, token_string=None, scopes=None):
self.rsa_key = keyfactory.parsePEMKey(rsa_key)
self.token_string = token_string or ''
self.scopes = scopes or []
def __str__(self):
return self.get_token_string()
def get_token_string(self):
return str(self.token_string)
def set_token_string(self, token_string):
self.token_string = token_string
def GetAuthHeader(self, http_method, http_url):
"""Generates the Authorization header.
The form of the secure AuthSub Authorization header is
Authorization: AuthSub token="token" sigalg="sigalg" data="data" sig="sig"
and data represents a string in the form
data = http_method http_url timestamp nonce
Args:
http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc.
http_url: string or atom.url.Url HTTP URL to which request is made.
Returns:
dict Header to be sent with every subsequent request after authentication.
"""
timestamp = int(math.floor(time.time()))
nonce = '%lu' % random.randrange(1, 2**64)
data = '%s %s %d %s' % (http_method, str(http_url), timestamp, nonce)
sig = cryptomath.bytesToBase64(self.rsa_key.hashAndSign(data))
header = {'Authorization': '%s"%s" data="%s" sig="%s" sigalg="rsa-sha1"' %
(AUTHSUB_AUTH_LABEL, self.token_string, data, sig)}
return header
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header and makes the HTTP request."""
if not headers:
headers = {}
headers.update(self.GetAuthHeader(operation, url))
return http_client.request(operation, url, data=data, headers=headers)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Base."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
# XML namespaces which are often used in Google Base entities.
GBASE_NAMESPACE = 'http://base.google.com/ns/1.0'
GBASE_TEMPLATE = '{http://base.google.com/ns/1.0}%s'
GMETA_NAMESPACE = 'http://base.google.com/ns-metadata/1.0'
GMETA_TEMPLATE = '{http://base.google.com/ns-metadata/1.0}%s'
class ItemAttributeContainer(object):
"""Provides methods for finding Google Base Item attributes.
Google Base item attributes are child nodes in the gbase namespace. Google
Base allows you to define your own item attributes and this class provides
methods to interact with the custom attributes.
"""
def GetItemAttributes(self, name):
"""Returns a list of all item attributes which have the desired name.
Args:
name: str The tag of the desired base attributes. For example, calling
this method with 'rating' would return a list of ItemAttributes
represented by a 'g:rating' tag.
Returns:
A list of matching ItemAttribute objects.
"""
result = []
for attrib in self.item_attributes:
if attrib.name == name:
result.append(attrib)
return result
def FindItemAttribute(self, name):
"""Get the contents of the first Base item attribute which matches name.
This method is deprecated, please use GetItemAttributes instead.
Args:
name: str The tag of the desired base attribute. For example, calling
this method with name = 'rating' would search for a tag rating
in the GBase namespace in the item attributes.
Returns:
The text contents of the item attribute, or none if the attribute was
not found.
"""
for attrib in self.item_attributes:
if attrib.name == name:
return attrib.text
return None
def AddItemAttribute(self, name, value, value_type=None, access=None):
"""Adds a new item attribute tag containing the value.
Creates a new extension element in the GBase namespace to represent a
Google Base item attribute.
Args:
name: str The tag name for the new attribute. This must be a valid xml
tag name. The tag will be placed in the GBase namespace.
value: str Contents for the item attribute
value_type: str (optional) The type of data in the vlaue, Examples: text
float
access: str (optional) Used to hide attributes. The attribute is not
exposed in the snippets feed if access is set to 'private'.
"""
new_attribute = ItemAttribute(name, text=value,
text_type=value_type, access=access)
self.item_attributes.append(new_attribute)
def SetItemAttribute(self, name, value):
"""Changes an existing item attribute's value."""
for attrib in self.item_attributes:
if attrib.name == name:
attrib.text = value
return
def RemoveItemAttribute(self, name):
"""Deletes the first extension element which matches name.
Deletes the first extension element which matches name.
"""
for i in xrange(len(self.item_attributes)):
if self.item_attributes[i].name == name:
del self.item_attributes[i]
return
# We need to overwrite _ConvertElementTreeToMember to add special logic to
# convert custom attributes to members
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(atom._CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
atom._CreateClassFromElementTree(member_class, child_tree))
elif child_tree.tag.find('{%s}' % GBASE_NAMESPACE) == 0:
# If this is in the gbase namespace, make it into an extension element.
name = child_tree.tag[child_tree.tag.index('}')+1:]
value = child_tree.text
if child_tree.attrib.has_key('type'):
value_type = child_tree.attrib['type']
else:
value_type = None
self.AddItemAttribute(name, value, value_type)
else:
atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
# We need to overwtite _AddMembersToElementTree to add special logic to
# convert custom members to XML nodes.
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Convert all special custom item attributes to nodes
for attribute in self.item_attributes:
attribute._BecomeChildElement(tree)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
atom.ExtensionContainer._AddMembersToElementTree(self, tree)
class ItemAttribute(atom.Text):
"""An optional or user defined attribute for a GBase item.
Google Base allows items to have custom attribute child nodes. These nodes
have contents and a type attribute which tells Google Base whether the
contents are text, a float value with units, etc. The Atom text class has
the same structure, so this class inherits from Text.
"""
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
_attributes['access'] = 'access'
def __init__(self, name, text_type=None, access=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for a GBase item attribute
Args:
name: str The name of the attribute. Examples include
price, color, make, model, pages, salary, etc.
text_type: str (optional) The type associated with the text contents
access: str (optional) If the access attribute is set to 'private', the
attribute will not be included in the item's description in the
snippets feed
text: str (optional) The text data in the this element
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute
value string pairs
"""
self.name = name
self.type = text_type
self.access = access
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def _BecomeChildElement(self, tree):
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.name)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.name))
self._AddMembersToElementTree(new_tree)
return new_tree
def ItemAttributeFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _ItemAttributeFromElementTree(element_tree)
def _ItemAttributeFromElementTree(element_tree):
if element_tree.tag.find(GBASE_TEMPLATE % '') == 0:
to_return = ItemAttribute('')
to_return._HarvestElementTree(element_tree)
to_return.name = element_tree.tag[element_tree.tag.index('}')+1:]
if to_return.name and to_return.name != '':
return to_return
return None
class Label(atom.AtomBase):
"""The Google Base label element"""
_tag = 'label'
_namespace = GBASE_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LabelFromString(xml_string):
return atom.CreateClassFromXMLString(Label, xml_string)
class Thumbnail(atom.AtomBase):
"""The Google Base thumbnail element"""
_tag = 'thumbnail'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['width'] = 'width'
_attributes['height'] = 'height'
def __init__(self, width=None, height=None, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.width = width
self.height = height
def ThumbnailFromString(xml_string):
return atom.CreateClassFromXMLString(Thumbnail, xml_string)
class ImageLink(atom.Text):
"""The Google Base image_link element"""
_tag = 'image_link'
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
_children['{%s}thumbnail' % GMETA_NAMESPACE] = ('thumbnail', [Thumbnail])
def __init__(self, thumbnail=None, text=None, extension_elements=None,
text_type=None, extension_attributes=None):
self.thumbnail = thumbnail or []
self.text = text
self.type = text_type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ImageLinkFromString(xml_string):
return atom.CreateClassFromXMLString(ImageLink, xml_string)
class ItemType(atom.Text):
"""The Google Base item_type element"""
_tag = 'item_type'
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
def __init__(self, text=None, extension_elements=None,
text_type=None, extension_attributes=None):
self.text = text
self.type = text_type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ItemTypeFromString(xml_string):
return atom.CreateClassFromXMLString(ItemType, xml_string)
class MetaItemType(ItemType):
"""The Google Base item_type element"""
_tag = 'item_type'
_namespace = GMETA_NAMESPACE
_children = ItemType._children.copy()
_attributes = ItemType._attributes.copy()
def MetaItemTypeFromString(xml_string):
return atom.CreateClassFromXMLString(MetaItemType, xml_string)
class Value(atom.AtomBase):
"""Metadata about common values for a given attribute
A value is a child of an attribute which comes from the attributes feed.
The value's text is a commonly used value paired with an attribute name
and the value's count tells how often this value appears for the given
attribute in the search results.
"""
_tag = 'value'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['count'] = 'count'
def __init__(self, count=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Attribute metadata element
Args:
count: str (optional) The number of times the value in text is given
for the parent attribute.
text: str (optional) The value which appears in the search results.
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute value
string pairs
"""
self.count = count
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ValueFromString(xml_string):
return atom.CreateClassFromXMLString(Value, xml_string)
class Attribute(atom.Text):
"""Metadata about an attribute from the attributes feed
An entry from the attributes feed contains a list of attributes. Each
attribute describes the attribute's type and count of the items which
use the attribute.
"""
_tag = 'attribute'
_namespace = GMETA_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
_children['{%s}value' % GMETA_NAMESPACE] = ('value', [Value])
_attributes['count'] = 'count'
_attributes['name'] = 'name'
def __init__(self, name=None, attribute_type=None, count=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
"""Constructor for Attribute metadata element
Args:
name: str (optional) The name of the attribute
attribute_type: str (optional) The type for the attribute. Examples:
test, float, etc.
count: str (optional) The number of times this attribute appears in
the query results.
value: list (optional) The values which are often used for this
attirbute.
text: str (optional) The text contents of the XML for this attribute.
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute value
string pairs
"""
self.name = name
self.type = attribute_type
self.count = count
self.value = value or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def AttributeFromString(xml_string):
return atom.CreateClassFromXMLString(Attribute, xml_string)
class Attributes(atom.AtomBase):
"""A collection of Google Base metadata attributes"""
_tag = 'attributes'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
def __init__(self, attribute=None, extension_elements=None,
extension_attributes=None, text=None):
self.attribute = attribute or []
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
class GBaseItem(ItemAttributeContainer, gdata.BatchEntry):
"""An Google Base flavor of an Atom Entry.
Google Base items have required attributes, recommended attributes, and user
defined attributes. The required attributes are stored in this class as
members, and other attributes are stored as extension elements. You can
access the recommended and user defined attributes by using
AddItemAttribute, SetItemAttribute, FindItemAttribute, and
RemoveItemAttribute.
The Base Item
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}label' % GBASE_NAMESPACE] = ('label', [Label])
_children['{%s}item_type' % GBASE_NAMESPACE] = ('item_type', ItemType)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, control=None,
label=None, item_type=None, item_attributes=None,
batch_operation=None, batch_id=None, batch_status=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.label = label or []
self.item_type = item_type
self.item_attributes = item_attributes or []
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseItemFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItem, xml_string)
class GBaseSnippet(GBaseItem):
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = GBaseItem._children.copy()
_attributes = GBaseItem._attributes.copy()
def GBaseSnippetFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseSnippet, xml_string)
class GBaseAttributeEntry(gdata.GDataEntry):
"""An Atom Entry from the attributes feed"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, label=None,
attribute=None, control=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.label = label or []
self.attribute = attribute or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseAttributeEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseAttributeEntry, xml_string)
class GBaseItemTypeEntry(gdata.GDataEntry):
"""An Atom entry from the item types feed
These entries contain a list of attributes which are stored in one
XML node called attributes. This class simplifies the data structure
by treating attributes as a list of attribute instances.
Note that the item_type for an item type entry is in the Google Base meta
namespace as opposed to item_types encountered in other feeds.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}attributes' % GMETA_NAMESPACE] = ('attributes', Attributes)
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
_children['{%s}item_type' % GMETA_NAMESPACE] = ('item_type', MetaItemType)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, label=None,
item_type=None, control=None, attribute=None, attributes=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.label = label or []
self.item_type = item_type
self.attributes = attributes
self.attribute = attribute or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseItemTypeEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemTypeEntry, xml_string)
class GBaseItemFeed(gdata.BatchFeed):
"""A feed containing Google Base Items"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItem])
def GBaseItemFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemFeed, xml_string)
class GBaseSnippetFeed(gdata.GDataFeed):
"""A feed containing Google Base Snippets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseSnippet])
def GBaseSnippetFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseSnippetFeed, xml_string)
class GBaseAttributesFeed(gdata.GDataFeed):
"""A feed containing Google Base Attributes
A query sent to the attributes feed will return a feed of
attributes which are present in the items that match the
query.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[GBaseAttributeEntry])
def GBaseAttributesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseAttributesFeed, xml_string)
class GBaseLocalesFeed(gdata.GDataFeed):
"""The locales feed from Google Base.
This read-only feed defines the permitted locales for Google Base. The
locale value identifies the language, currency, and date formats used in a
feed.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
def GBaseLocalesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseLocalesFeed, xml_string)
class GBaseItemTypesFeed(gdata.GDataFeed):
"""A feed from the Google Base item types feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItemTypeEntry])
def GBaseItemTypesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemTypesFeed, xml_string)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GBaseService extends the GDataService to streamline Google Base operations.
GBaseService: Provides methods to query feeds and manipulate items. Extends
GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import urllib
import gdata
import atom.service
import gdata.service
import gdata.base
import atom
# URL to which all batch requests are sent.
BASE_BATCH_URL = 'http://www.google.com/base/feeds/items/batch'
class Error(Exception):
pass
class RequestError(Error):
pass
class GBaseService(gdata.service.GDataService):
"""Client for the Google Base service."""
def __init__(self, email=None, password=None, source=None,
server='base.google.com', api_key=None, additional_headers=None,
handler=None, **kwargs):
"""Creates a client for the Google Base service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'base.google.com'.
api_key: string (optional) The Google Base API key to use.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='gbase', source=source,
server=server, additional_headers=additional_headers, handler=handler,
**kwargs)
self.api_key = api_key
def _SetAPIKey(self, api_key):
if not isinstance(self.additional_headers, dict):
self.additional_headers = {}
self.additional_headers['X-Google-Key'] = api_key
def __SetAPIKey(self, api_key):
self._SetAPIKey(api_key)
def _GetAPIKey(self):
if 'X-Google-Key' not in self.additional_headers:
return None
else:
return self.additional_headers['X-Google-Key']
def __GetAPIKey(self):
return self._GetAPIKey()
api_key = property(__GetAPIKey, __SetAPIKey,
doc="""Get or set the API key to be included in all requests.""")
def Query(self, uri, converter=None):
"""Performs a style query and returns a resulting feed or entry.
Args:
uri: string The full URI which be queried. Examples include
'/base/feeds/snippets?bq=digital+camera',
'http://www.google.com/base/feeds/snippets?bq=digital+camera'
'/base/feeds/items'
I recommend creating a URI using a query class.
converter: func (optional) A function which will be executed on the
server's response. Examples include GBaseItemFromString, etc.
Returns:
If converter was specified, returns the results of calling converter on
the server's response. If converter was not specified, and the result
was an Atom Entry, returns a GBaseItem, by default, the method returns
the result of calling gdata.service's Get method.
"""
result = self.Get(uri, converter=converter)
if converter:
return result
elif isinstance(result, atom.Entry):
return gdata.base.GBaseItemFromString(result.ToString())
return result
def QuerySnippetsFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseSnippetFeedFromString)
def QueryItemsFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemFeedFromString)
def QueryAttributesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseAttributesFeedFromString)
def QueryItemTypesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemTypesFeedFromString)
def QueryLocalesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseLocalesFeedFromString)
def GetItem(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemFromString)
def GetSnippet(self, uri):
return self.Get(uri, converter=gdata.base.GBaseSnippetFromString)
def GetAttribute(self, uri):
return self.Get(uri, converter=gdata.base.GBaseAttributeEntryFromString)
def GetItemType(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemTypeEntryFromString)
def GetLocale(self, uri):
return self.Get(uri, converter=gdata.base.GDataEntryFromString)
def InsertItem(self, new_item, url_params=None, escape_params=True,
converter=None):
"""Adds an item to Google Base.
Args:
new_item: atom.Entry or subclass A new item which is to be added to
Google Base.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
GBaseItemFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a GBaseItem.
"""
response = self.Post(new_item, '/base/feeds/items', url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return gdata.base.GBaseItemFromString(response.ToString())
return response
def DeleteItem(self, item_id, url_params=None, escape_params=True):
"""Removes an item with the specified ID from Google Base.
Args:
item_id: string The ID of the item to be deleted. Example:
'http://www.google.com/base/feeds/items/13185446517496042648'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
True if the delete succeeded.
"""
return self.Delete('%s' % (item_id[len('http://www.google.com'):],),
url_params=url_params, escape_params=escape_params)
def UpdateItem(self, item_id, updated_item, url_params=None,
escape_params=True,
converter=gdata.base.GBaseItemFromString):
"""Updates an existing item.
Args:
item_id: string The ID of the item to be updated. Example:
'http://www.google.com/base/feeds/items/13185446517496042648'
updated_item: atom.Entry, subclass, or string, containing
the Atom Entry which will replace the base item which is
stored at the item_id.
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
GBaseItemFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a GBaseItem.
"""
response = self.Put(updated_item,
item_id, url_params=url_params, escape_params=escape_params,
converter=converter)
if not converter and isinstance(response, atom.Entry):
return gdata.base.GBaseItemFromString(response.ToString())
return response
def ExecuteBatch(self, batch_feed,
converter=gdata.base.GBaseItemFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.BatchFeed A feed containing BatchEntry elements which
contain the desired CRUD operation and any necessary entry data.
converter: Function (optional) Function to be executed on the server's
response. This function should take one string as a parameter. The
default value is GBaseItemFeedFromString which will turn the result
into a gdata.base.GBaseItem object.
Returns:
A gdata.BatchFeed containing the results.
"""
return self.Post(batch_feed, BASE_BATCH_URL, converter=converter)
class BaseQuery(gdata.service.Query):
def _GetBaseQuery(self):
return self['bq']
def _SetBaseQuery(self, base_query):
self['bq'] = base_query
bq = property(_GetBaseQuery, _SetBaseQuery,
doc="""The bq query parameter""")
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Health."""
__author__ = 'api.eric@google.com (Eric Bidelman)'
import atom
import gdata
CCR_NAMESPACE = 'urn:astm-org:CCR'
METADATA_NAMESPACE = 'http://schemas.google.com/health/metadata'
class Ccr(atom.AtomBase):
"""Represents a Google Health <ContinuityOfCareRecord>."""
_tag = 'ContinuityOfCareRecord'
_namespace = CCR_NAMESPACE
_children = atom.AtomBase._children.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
def GetAlerts(self):
"""Helper for extracting Alert/Allergy data from the CCR.
Returns:
A list of ExtensionElements (one for each allergy found) or None if
no allergies where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Alerts')[0].FindChildren('Alert')
except:
return None
def GetAllergies(self):
"""Alias for GetAlerts()."""
return self.GetAlerts()
def GetProblems(self):
"""Helper for extracting Problem/Condition data from the CCR.
Returns:
A list of ExtensionElements (one for each problem found) or None if
no problems where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Problems')[0].FindChildren('Problem')
except:
return None
def GetConditions(self):
"""Alias for GetProblems()."""
return self.GetProblems()
def GetProcedures(self):
"""Helper for extracting Procedure data from the CCR.
Returns:
A list of ExtensionElements (one for each procedure found) or None if
no procedures where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Procedures')[0].FindChildren('Procedure')
except:
return None
def GetImmunizations(self):
"""Helper for extracting Immunization data from the CCR.
Returns:
A list of ExtensionElements (one for each immunization found) or None if
no immunizations where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Immunizations')[0].FindChildren('Immunization')
except:
return None
def GetMedications(self):
"""Helper for extracting Medication data from the CCR.
Returns:
A list of ExtensionElements (one for each medication found) or None if
no medications where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Medications')[0].FindChildren('Medication')
except:
return None
def GetResults(self):
"""Helper for extracting Results/Labresults data from the CCR.
Returns:
A list of ExtensionElements (one for each result found) or None if
no results where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Results')[0].FindChildren('Result')
except:
return None
class ProfileEntry(gdata.GDataEntry):
"""The Google Health version of an Atom Entry."""
_tag = gdata.GDataEntry._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}ContinuityOfCareRecord' % CCR_NAMESPACE] = ('ccr', Ccr)
def __init__(self, ccr=None, author=None, category=None, content=None,
atom_id=None, link=None, published=None, title=None,
updated=None, text=None, extension_elements=None,
extension_attributes=None):
self.ccr = ccr
gdata.GDataEntry.__init__(
self, author=author, category=category, content=content,
atom_id=atom_id, link=link, published=published, title=title,
updated=updated, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class ProfileFeed(gdata.GDataFeed):
"""A feed containing a list of Google Health profile entries."""
_tag = gdata.GDataFeed._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry])
class ProfileListEntry(gdata.GDataEntry):
"""The Atom Entry in the Google Health profile list feed."""
_tag = gdata.GDataEntry._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
def GetProfileId(self):
return self.content.text
def GetProfileName(self):
return self.title.text
class ProfileListFeed(gdata.GDataFeed):
"""A feed containing a list of Google Health profile list entries."""
_tag = gdata.GDataFeed._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileListEntry])
def ProfileEntryFromString(xml_string):
"""Converts an XML string into a ProfileEntry object.
Args:
xml_string: string The XML describing a Health profile feed entry.
Returns:
A ProfileEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileEntry, xml_string)
def ProfileListEntryFromString(xml_string):
"""Converts an XML string into a ProfileListEntry object.
Args:
xml_string: string The XML describing a Health profile list feed entry.
Returns:
A ProfileListEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileListEntry, xml_string)
def ProfileFeedFromString(xml_string):
"""Converts an XML string into a ProfileFeed object.
Args:
xml_string: string The XML describing a ProfileFeed feed.
Returns:
A ProfileFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileFeed, xml_string)
def ProfileListFeedFromString(xml_string):
"""Converts an XML string into a ProfileListFeed object.
Args:
xml_string: string The XML describing a ProfileListFeed feed.
Returns:
A ProfileListFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileListFeed, xml_string)
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HealthService extends GDataService to streamline Google Health API access.
HealthService: Provides methods to interact with the profile, profile list,
and register/notices feeds. Extends GDataService.
HealthProfileQuery: Queries the Google Health Profile feed.
HealthProfileListQuery: Queries the Google Health Profile list feed.
"""
__author__ = 'api.eric@google.com (Eric Bidelman)'
import atom
import gdata.health
import gdata.service
class HealthService(gdata.service.GDataService):
"""Client extension for the Google Health service Document List feed."""
def __init__(self, email=None, password=None, source=None,
use_h9_sandbox=False, server='www.google.com',
additional_headers=None, **kwargs):
"""Creates a client for the Google Health service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
use_h9_sandbox: boolean (optional) True to issue requests against the
/h9 developer's sandbox.
server: string (optional) The name of the server to which a connection
will be opened.
additional_headers: dictionary (optional) Any additional headers which
should be included with CRUD operations.
kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
service = use_h9_sandbox and 'weaver' or 'health'
gdata.service.GDataService.__init__(
self, email=email, password=password, service=service, source=source,
server=server, additional_headers=additional_headers, **kwargs)
self.ssl = True
self.use_h9_sandbox = use_h9_sandbox
def __get_service(self):
return self.use_h9_sandbox and 'h9' or 'health'
def GetProfileFeed(self, query=None, profile_id=None):
"""Fetches the users Google Health profile feed.
Args:
query: HealthProfileQuery or string (optional) A query to use on the
profile feed. If None, a HealthProfileQuery is constructed.
profile_id: string (optional) The profile id to query the profile feed
with when using ClientLogin. Note: this parameter is ignored if
query is set.
Returns:
A gdata.health.ProfileFeed object containing the user's Health profile.
"""
if query is None:
projection = profile_id and 'ui' or 'default'
uri = HealthProfileQuery(
service=self.__get_service(), projection=projection,
profile_id=profile_id).ToUri()
elif isinstance(query, HealthProfileQuery):
uri = query.ToUri()
else:
uri = query
return self.GetFeed(uri, converter=gdata.health.ProfileFeedFromString)
def GetProfileListFeed(self, query=None):
"""Fetches the users Google Health profile feed.
Args:
query: HealthProfileListQuery or string (optional) A query to use
on the profile list feed. If None, a HealthProfileListQuery is
constructed to /health/feeds/profile/list or /h9/feeds/profile/list.
Returns:
A gdata.health.ProfileListFeed object containing the user's list
of profiles.
"""
if not query:
uri = HealthProfileListQuery(service=self.__get_service()).ToUri()
elif isinstance(query, HealthProfileListQuery):
uri = query.ToUri()
else:
uri = query
return self.GetFeed(uri, converter=gdata.health.ProfileListFeedFromString)
def SendNotice(self, subject, body=None, content_type='html',
ccr=None, profile_id=None):
"""Sends (posts) a notice to the user's Google Health profile.
Args:
subject: A string representing the message's subject line.
body: string (optional) The message body.
content_type: string (optional) The content type of the notice message
body. This parameter is only honored when a message body is
specified.
ccr: string (optional) The CCR XML document to reconcile into the
user's profile.
profile_id: string (optional) The profile id to work with when using
ClientLogin. Note: this parameter is ignored if query is set.
Returns:
A gdata.health.ProfileEntry object of the posted entry.
"""
if body:
content = atom.Content(content_type=content_type, text=body)
else:
content = body
entry = gdata.GDataEntry(
title=atom.Title(text=subject), content=content,
extension_elements=[atom.ExtensionElementFromString(ccr)])
projection = profile_id and 'ui' or 'default'
query = HealthRegisterQuery(service=self.__get_service(),
projection=projection, profile_id=profile_id)
return self.Post(entry, query.ToUri(),
converter=gdata.health.ProfileEntryFromString)
class HealthProfileQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Health profile feed."""
def __init__(self, service='health', feed='feeds/profile',
projection='default', profile_id=None, text_query=None,
params=None, categories=None):
"""Constructor for Health profile feed query.
Args:
service: string (optional) The service to query. Either 'health' or 'h9'.
feed: string (optional) The path for the feed. The default value is
'feeds/profile'.
projection: string (optional) The visibility of the data. Possible values
are 'default' for AuthSub and 'ui' for ClientLogin. If this value
is set to 'ui', the profile_id parameter should also be set.
profile_id: string (optional) The profile id to query. This should only
be used when using ClientLogin.
text_query: str (optional) The contents of the q query parameter. The
contents of the text_query are URL escaped upon conversion to a URI.
Note: this parameter can only be used on the register feed using
ClientLogin.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
"""
self.service = service
self.profile_id = profile_id
self.projection = projection
gdata.service.Query.__init__(self, feed=feed, text_query=text_query,
params=params, categories=categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Health
profile feed.
"""
old_feed = self.feed
self.feed = '/'.join([self.service, old_feed, self.projection])
if self.profile_id:
self.feed += '/' + self.profile_id
self.feed = '/%s' % (self.feed,)
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
class HealthProfileListQuery(gdata.service.Query):
"""Object used to construct a URI to query a Health profile list feed."""
def __init__(self, service='health', feed='feeds/profile/list'):
"""Constructor for Health profile list feed query.
Args:
service: string (optional) The service to query. Either 'health' or 'h9'.
feed: string (optional) The path for the feed. The default value is
'feeds/profile/list'.
"""
gdata.service.Query.__init__(self, feed)
self.service = service
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the
profile list feed.
"""
return '/%s' % ('/'.join([self.service, self.feed]),)
class HealthRegisterQuery(gdata.service.Query):
"""Object used to construct a URI to query a Health register/notice feed."""
def __init__(self, service='health', feed='feeds/register',
projection='default', profile_id=None):
"""Constructor for Health profile list feed query.
Args:
service: string (optional) The service to query. Either 'health' or 'h9'.
feed: string (optional) The path for the feed. The default value is
'feeds/register'.
projection: string (optional) The visibility of the data. Possible values
are 'default' for AuthSub and 'ui' for ClientLogin. If this value
is set to 'ui', the profile_id parameter should also be set.
profile_id: string (optional) The profile id to query. This should only
be used when using ClientLogin.
"""
gdata.service.Query.__init__(self, feed)
self.service = service
self.projection = projection
self.profile_id = profile_id
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI needed to interact with the register feed.
"""
old_feed = self.feed
self.feed = '/'.join([self.service, old_feed, self.projection])
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
if self.profile_id:
new_feed += '/' + self.profile_id
return '/%s' % (new_feed,)
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the Calendar
Resource API."""
__author__ = 'Vic Fryzel <vf@google.com>'
import atom.core
import atom.data
import gdata.apps
import gdata.data
# This is required to work around a naming conflict between the Google
# Spreadsheets API and Python's built-in property function
pyproperty = property
# The apps:property name of the resourceId property
RESOURCE_ID_NAME = 'resourceId'
# The apps:property name of the resourceCommonName property
RESOURCE_COMMON_NAME_NAME = 'resourceCommonName'
# The apps:property name of the resourceDescription property
RESOURCE_DESCRIPTION_NAME = 'resourceDescription'
# The apps:property name of the resourceType property
RESOURCE_TYPE_NAME = 'resourceType'
class AppsProperty(atom.core.XmlElement):
"""Represents an <apps:property> element in a Calendar Resource feed."""
_qname = gdata.apps.APPS_TEMPLATE % 'property'
name = 'name'
value = 'value'
class CalendarResourceEntry(gdata.data.GDEntry):
"""Represents a Calendar Resource entry in object form."""
property = [AppsProperty]
def _GetProperty(self, name):
"""Get the apps:property value with the given name.
Args:
name: string Name of the apps:property value to get.
Returns:
The apps:property value with the given name, or None if the name was
invalid.
"""
value = None
for p in self.property:
if p.name == name:
value = p.value
break
return value
def _SetProperty(self, name, value):
"""Set the apps:property value with the given name to the given value.
Args:
name: string Name of the apps:property value to set.
value: string Value to give the apps:property value with the given name.
"""
found = False
for i in range(len(self.property)):
if self.property[i].name == name:
self.property[i].value = value
found = True
break
if not found:
self.property.append(AppsProperty(name=name, value=value))
def GetResourceId(self):
"""Get the resource ID of this Calendar Resource object.
Returns:
The resource ID of this Calendar Resource object as a string or None.
"""
return self._GetProperty(RESOURCE_ID_NAME)
def SetResourceId(self, value):
"""Set the resource ID of this Calendar Resource object.
Args:
value: string The new resource ID value to give this object.
"""
self._SetProperty(RESOURCE_ID_NAME, value)
resource_id = pyproperty(GetResourceId, SetResourceId)
def GetResourceCommonName(self):
"""Get the common name of this Calendar Resource object.
Returns:
The common name of this Calendar Resource object as a string or None.
"""
return self._GetProperty(RESOURCE_COMMON_NAME_NAME)
def SetResourceCommonName(self, value):
"""Set the common name of this Calendar Resource object.
Args:
value: string The new common name value to give this object.
"""
self._SetProperty(RESOURCE_COMMON_NAME_NAME, value)
resource_common_name = pyproperty(GetResourceCommonName,
SetResourceCommonName)
def GetResourceDescription(self):
"""Get the description of this Calendar Resource object.
Returns:
The description of this Calendar Resource object as a string or None.
"""
return self._GetProperty(RESOURCE_DESCRIPTION_NAME)
def SetResourceDescription(self, value):
"""Set the description of this Calendar Resource object.
Args:
value: string The new description value to give this object.
"""
self._SetProperty(RESOURCE_DESCRIPTION_NAME, value)
resource_description = pyproperty(GetResourceDescription,
SetResourceDescription)
def GetResourceType(self):
"""Get the type of this Calendar Resource object.
Returns:
The type of this Calendar Resource object as a string or None.
"""
return self._GetProperty(RESOURCE_TYPE_NAME)
def SetResourceType(self, value):
"""Set the type value of this Calendar Resource object.
Args:
value: string The new type value to give this object.
"""
self._SetProperty(RESOURCE_TYPE_NAME, value)
resource_type = pyproperty(GetResourceType, SetResourceType)
def __init__(self, resource_id=None, resource_common_name=None,
resource_description=None, resource_type=None, *args, **kwargs):
"""Constructs a new CalendarResourceEntry object with the given arguments.
Args:
resource_id: string (optional) The resource ID to give this new object.
resource_common_name: string (optional) The common name to give this new
object.
resource_description: string (optional) The description to give this new
object.
resource_type: string (optional) The type to give this new object.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(CalendarResourceEntry, self).__init__(*args, **kwargs)
if resource_id:
self.resource_id = resource_id
if resource_common_name:
self.resource_common_name = resource_common_name
if resource_description:
self.resource_description = resource_description
if resource_type:
self.resource_type = resource_type
class CalendarResourceFeed(gdata.data.GDFeed):
"""Represents a feed of CalendarResourceEntry objects."""
entry = [CalendarResourceEntry]
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CalendarResourceClient simplifies Calendar Resources API calls.
CalendarResourceClient extends gdata.client.GDClient to ease interaction with
the Google Apps Calendar Resources API. These interactions include the ability
to create, retrieve, update, and delete calendar resources in a Google Apps
domain.
"""
__author__ = 'Vic Fryzel <vf@google.com>'
import urllib
import atom.data
import gdata.client
import gdata.calendar_resource.data
# Feed URI template. This must end with a /
RESOURCE_FEED_TEMPLATE = '/a/feeds/calendar/resource/%s/%s/'
class CalendarResourceClient(gdata.client.GDClient):
"""Client extension for the Google Calendar Resource API service.
Attributes:
host: string The hostname for the Calendar Resouce API service.
api_version: string The version of the Calendar Resource API.
"""
host = 'apps-apis.google.com'
api_version = '2.0'
auth_service = 'apps'
auth_scopes = gdata.gauth.AUTH_SCOPES['apps']
def __init__(self, domain, auth_token=None, **kwargs):
"""Constructs a new client for the Calendar Resource API.
Args:
domain: string The Google Apps domain with Calendar Resources.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the calendar resource
data.
kwargs: The other parameters to pass to gdata.client.GDClient constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
self.domain = domain
def make_resource_feed_uri(self, resource_id=None, params=None):
"""Creates a resource feed URI for the Calendar Resource API.
Using this client's Google Apps domain, create a feed URI for calendar
resources in that domain. If a resource_id is provided, return a URI
for that specific resource. If params are provided, append them as GET
params.
Args:
resource_id: string (optional) The ID of the calendar resource for which
to make a feed URI.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for calendar resources for this client's Google
Apps domain.
"""
uri = RESOURCE_FEED_TEMPLATE % (self.api_version, self.domain)
if resource_id:
uri += resource_id
if params:
uri += '?' + urllib.urlencode(params)
return uri
MakeResourceFeedUri = make_resource_feed_uri
def get_resource_feed(self, uri=None, **kwargs):
"""Fetches a ResourceFeed of calendar resources at the given URI.
Args:
uri: string The URI of the feed to pull.
Returns:
A ResourceFeed object representing the feed at the given URI.
"""
if uri is None:
uri = self.MakeResourceFeedUri()
return self.get_feed(uri,
desired_class=gdata.calendar_resource.data.CalendarResourceFeed,
**kwargs)
GetResourceFeed = get_resource_feed
def get_resource(self, uri=None, resource_id=None, **kwargs):
"""Fetches a single calendar resource by resource ID.
Args:
uri: string The base URI of the feed from which to fetch the resource.
resource_id: string The string ID of the Resource to fetch.
Returns:
A Resource object representing the calendar resource with the given
base URI and resource ID.
"""
if uri is None:
uri = self.MakeResourceUri(resource_id)
return self.get_entry(uri,
desired_class=gdata.calendar_resource.data.CalendarResourceEntry,
**kwargs)
GetResource = get_resource
def create_resource(self, resource_id, resource_common_name=None,
resource_description=None, resource_type=None, **kwargs):
"""Creates a calendar resource with the given properties.
Args:
resource_id: string The resource ID of the calendar resource.
resource_common_name: string (optional) The common name of the resource.
resource_description: string (optional) The description of the resource.
resource_type: string (optional) The type of the resource.
Returns:
gdata.calendar_resource.data.CalendarResourceEntry of the new resource.
"""
new_resource = gdata.calendar_resource.data.CalendarResourceEntry(
resource_id=resource_id, resource_common_name=resource_common_name,
resource_description=resource_description, resource_type=resource_type)
return self.post(new_resource, self.MakeResourceFeedUri(), **kwargs)
CreateResource = create_resource
def update_resource(self, resource_id, resource_common_name=None,
resource_description=None, resource_type=None, **kwargs):
"""Updates the calendar resource with the given resource ID.
Args:
resource_id: string The resource ID of the calendar resource to update.
resource_common_name: string (optional) The common name to give the
resource.
resource_description: string (optional) The description to give the
resource.
resource_type: string (optional) The type to give the resource.
Returns:
gdata.calendar_resource.data.CalendarResourceEntry of the updated resource.
"""
new_resource = gdata.calendar_resource.data.CalendarResourceEntry(
resource_id=resource_id, resource_common_name=resource_common_name,
resource_description=resource_description, resource_type=resource_type)
return self.update(new_resource,
self.MakeResourceFeedUri(resource_id), **kwargs)
UpdateResource = update_resource
def delete_resource(self, resource_id, **kwargs):
"""Deletes the calendar resource with the given resource ID.
Args:
resource_id: string The resource ID of the calendar resource to delete.
kwargs: Other parameters to pass to gdata.client.delete()
Returns:
An HTTP response object. See gdata.client.request().
"""
return self.delete(self.MakeResourceFeedUri(resource_id),
**kwargs)
DeleteResource = delete_resource
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Data namespace.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/gdata/docs/2.0/elements.html
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import os
import atom.core
import atom.data
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
GD_TEMPLATE = GDATA_TEMPLATE
OPENSEARCH_TEMPLATE_V1 = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
OPENSEARCH_TEMPLATE_V2 = '{http://a9.com/-/spec/opensearch/1.1/}%s'
BATCH_TEMPLATE = '{http://schemas.google.com/gdata/batch}%s'
# Labels used in batch request entries to specify the desired CRUD operation.
BATCH_INSERT = 'insert'
BATCH_UPDATE = 'update'
BATCH_DELETE = 'delete'
BATCH_QUERY = 'query'
EVENT_LOCATION = 'http://schemas.google.com/g/2005#event'
ALTERNATE_LOCATION = 'http://schemas.google.com/g/2005#event.alternate'
PARKING_LOCATION = 'http://schemas.google.com/g/2005#event.parking'
CANCELED_EVENT = 'http://schemas.google.com/g/2005#event.canceled'
CONFIRMED_EVENT = 'http://schemas.google.com/g/2005#event.confirmed'
TENTATIVE_EVENT = 'http://schemas.google.com/g/2005#event.tentative'
CONFIDENTIAL_EVENT = 'http://schemas.google.com/g/2005#event.confidential'
DEFAULT_EVENT = 'http://schemas.google.com/g/2005#event.default'
PRIVATE_EVENT = 'http://schemas.google.com/g/2005#event.private'
PUBLIC_EVENT = 'http://schemas.google.com/g/2005#event.public'
OPAQUE_EVENT = 'http://schemas.google.com/g/2005#event.opaque'
TRANSPARENT_EVENT = 'http://schemas.google.com/g/2005#event.transparent'
CHAT_MESSAGE = 'http://schemas.google.com/g/2005#message.chat'
INBOX_MESSAGE = 'http://schemas.google.com/g/2005#message.inbox'
SENT_MESSAGE = 'http://schemas.google.com/g/2005#message.sent'
SPAM_MESSAGE = 'http://schemas.google.com/g/2005#message.spam'
STARRED_MESSAGE = 'http://schemas.google.com/g/2005#message.starred'
UNREAD_MESSAGE = 'http://schemas.google.com/g/2005#message.unread'
BCC_RECIPIENT = 'http://schemas.google.com/g/2005#message.bcc'
CC_RECIPIENT = 'http://schemas.google.com/g/2005#message.cc'
SENDER = 'http://schemas.google.com/g/2005#message.from'
REPLY_TO = 'http://schemas.google.com/g/2005#message.reply-to'
TO_RECIPIENT = 'http://schemas.google.com/g/2005#message.to'
ASSISTANT_REL = 'http://schemas.google.com/g/2005#assistant'
CALLBACK_REL = 'http://schemas.google.com/g/2005#callback'
CAR_REL = 'http://schemas.google.com/g/2005#car'
COMPANY_MAIN_REL = 'http://schemas.google.com/g/2005#company_main'
FAX_REL = 'http://schemas.google.com/g/2005#fax'
HOME_REL = 'http://schemas.google.com/g/2005#home'
HOME_FAX_REL = 'http://schemas.google.com/g/2005#home_fax'
ISDN_REL = 'http://schemas.google.com/g/2005#isdn'
MAIN_REL = 'http://schemas.google.com/g/2005#main'
MOBILE_REL = 'http://schemas.google.com/g/2005#mobile'
OTHER_REL = 'http://schemas.google.com/g/2005#other'
OTHER_FAX_REL = 'http://schemas.google.com/g/2005#other_fax'
PAGER_REL = 'http://schemas.google.com/g/2005#pager'
RADIO_REL = 'http://schemas.google.com/g/2005#radio'
TELEX_REL = 'http://schemas.google.com/g/2005#telex'
TTL_TDD_REL = 'http://schemas.google.com/g/2005#tty_tdd'
WORK_REL = 'http://schemas.google.com/g/2005#work'
WORK_FAX_REL = 'http://schemas.google.com/g/2005#work_fax'
WORK_MOBILE_REL = 'http://schemas.google.com/g/2005#work_mobile'
WORK_PAGER_REL = 'http://schemas.google.com/g/2005#work_pager'
NETMEETING_REL = 'http://schemas.google.com/g/2005#netmeeting'
OVERALL_REL = 'http://schemas.google.com/g/2005#overall'
PRICE_REL = 'http://schemas.google.com/g/2005#price'
QUALITY_REL = 'http://schemas.google.com/g/2005#quality'
EVENT_REL = 'http://schemas.google.com/g/2005#event'
EVENT_ALTERNATE_REL = 'http://schemas.google.com/g/2005#event.alternate'
EVENT_PARKING_REL = 'http://schemas.google.com/g/2005#event.parking'
AIM_PROTOCOL = 'http://schemas.google.com/g/2005#AIM'
MSN_PROTOCOL = 'http://schemas.google.com/g/2005#MSN'
YAHOO_MESSENGER_PROTOCOL = 'http://schemas.google.com/g/2005#YAHOO'
SKYPE_PROTOCOL = 'http://schemas.google.com/g/2005#SKYPE'
QQ_PROTOCOL = 'http://schemas.google.com/g/2005#QQ'
GOOGLE_TALK_PROTOCOL = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
ICQ_PROTOCOL = 'http://schemas.google.com/g/2005#ICQ'
JABBER_PROTOCOL = 'http://schemas.google.com/g/2005#JABBER'
REGULAR_COMMENTS = 'http://schemas.google.com/g/2005#regular'
REVIEW_COMMENTS = 'http://schemas.google.com/g/2005#reviews'
MAIL_BOTH = 'http://schemas.google.com/g/2005#both'
MAIL_LETTERS = 'http://schemas.google.com/g/2005#letters'
MAIL_PARCELS = 'http://schemas.google.com/g/2005#parcels'
MAIL_NEITHER = 'http://schemas.google.com/g/2005#neither'
GENERAL_ADDRESS = 'http://schemas.google.com/g/2005#general'
LOCAL_ADDRESS = 'http://schemas.google.com/g/2005#local'
OPTIONAL_ATENDEE = 'http://schemas.google.com/g/2005#event.optional'
REQUIRED_ATENDEE = 'http://schemas.google.com/g/2005#event.required'
ATTENDEE_ACCEPTED = 'http://schemas.google.com/g/2005#event.accepted'
ATTENDEE_DECLINED = 'http://schemas.google.com/g/2005#event.declined'
ATTENDEE_INVITED = 'http://schemas.google.com/g/2005#event.invited'
ATTENDEE_TENTATIVE = 'http://schemas.google.com/g/2005#event.tentative'
FULL_PROJECTION = 'full'
VALUES_PROJECTION = 'values'
BASIC_PROJECTION = 'basic'
PRIVATE_VISIBILITY = 'private'
PUBLIC_VISIBILITY = 'public'
ACL_REL = 'http://schemas.google.com/acl/2007#accessControlList'
class Error(Exception):
pass
class MissingRequiredParameters(Error):
pass
class LinkFinder(atom.data.LinkFinder):
"""Mixin used in Feed and Entry classes to simplify link lookups by type.
Provides lookup methods for edit, edit-media, post, ACL and other special
links which are common across Google Data APIs.
"""
def find_html_link(self):
"""Finds the first link with rel of alternate and type of text/html."""
for link in self.link:
if link.rel == 'alternate' and link.type == 'text/html':
return link.href
return None
FindHtmlLink = find_html_link
def get_html_link(self):
for a_link in self.link:
if a_link.rel == 'alternate' and a_link.type == 'text/html':
return a_link
return None
GetHtmlLink = get_html_link
def find_post_link(self):
"""Get the URL to which new entries should be POSTed.
The POST target URL is used to insert new entries.
Returns:
A str for the URL in the link with a rel matching the POST type.
"""
return self.find_url('http://schemas.google.com/g/2005#post')
FindPostLink = find_post_link
def get_post_link(self):
return self.get_link('http://schemas.google.com/g/2005#post')
GetPostLink = get_post_link
def find_acl_link(self):
acl_link = self.get_acl_link()
if acl_link:
return acl_link.href
return None
FindAclLink = find_acl_link
def get_acl_link(self):
"""Searches for a link or feed_link (if present) with the rel for ACL."""
acl_link = self.get_link(ACL_REL)
if acl_link:
return acl_link
elif hasattr(self, 'feed_link'):
for a_feed_link in self.feed_link:
if a_feed_link.rel == ACL_REL:
return a_feed_link
return None
GetAclLink = get_acl_link
def find_feed_link(self):
return self.find_url('http://schemas.google.com/g/2005#feed')
FindFeedLink = find_feed_link
def get_feed_link(self):
return self.get_link('http://schemas.google.com/g/2005#feed')
GetFeedLink = get_feed_link
def find_previous_link(self):
return self.find_url('previous')
FindPreviousLink = find_previous_link
def get_previous_link(self):
return self.get_link('previous')
GetPreviousLink = get_previous_link
class TotalResults(atom.core.XmlElement):
"""opensearch:TotalResults for a GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'totalResults',
OPENSEARCH_TEMPLATE_V2 % 'totalResults')
class StartIndex(atom.core.XmlElement):
"""The opensearch:startIndex element in GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'startIndex',
OPENSEARCH_TEMPLATE_V2 % 'startIndex')
class ItemsPerPage(atom.core.XmlElement):
"""The opensearch:itemsPerPage element in GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'itemsPerPage',
OPENSEARCH_TEMPLATE_V2 % 'itemsPerPage')
class ExtendedProperty(atom.core.XmlElement):
"""The Google Data extendedProperty element.
Used to store arbitrary key-value information specific to your
application. The value can either be a text string stored as an XML
attribute (.value), or an XML node (XmlBlob) as a child element.
This element is used in the Google Calendar data API and the Google
Contacts data API.
"""
_qname = GDATA_TEMPLATE % 'extendedProperty'
name = 'name'
value = 'value'
def get_xml_blob(self):
"""Returns the XML blob as an atom.core.XmlElement.
Returns:
An XmlElement representing the blob's XML, or None if no
blob was set.
"""
if self._other_elements:
return self._other_elements[0]
else:
return None
GetXmlBlob = get_xml_blob
def set_xml_blob(self, blob):
"""Sets the contents of the extendedProperty to XML as a child node.
Since the extendedProperty is only allowed one child element as an XML
blob, setting the XML blob will erase any preexisting member elements
in this object.
Args:
blob: str or atom.core.XmlElement representing the XML blob stored in
the extendedProperty.
"""
# Erase any existing extension_elements, clears the child nodes from the
# extendedProperty.
if isinstance(blob, atom.core.XmlElement):
self._other_elements = [blob]
else:
self._other_elements = [atom.core.parse(str(blob))]
SetXmlBlob = set_xml_blob
class GDEntry(atom.data.Entry, LinkFinder):
"""Extends Atom Entry to provide data processing"""
etag = '{http://schemas.google.com/g/2005}etag'
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def is_media(self):
if self.find_media_edit_link():
return True
return False
IsMedia = is_media
def find_media_link(self):
"""Returns the URL to the media content, if the entry is a media entry.
Otherwise returns None.
"""
if self.is_media():
return self.content.src
return None
FindMediaLink = find_media_link
class GDFeed(atom.data.Feed, LinkFinder):
"""A Feed from a GData service."""
etag = '{http://schemas.google.com/g/2005}etag'
total_results = TotalResults
start_index = StartIndex
items_per_page = ItemsPerPage
entry = [GDEntry]
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def get_generator(self):
if self.generator and self.generator.text:
return self.generator.text.strip()
return None
class BatchId(atom.core.XmlElement):
"""Identifies a single operation in a batch request."""
_qname = BATCH_TEMPLATE % 'id'
class BatchOperation(atom.core.XmlElement):
"""The CRUD operation which this batch entry represents."""
_qname = BATCH_TEMPLATE % 'operation'
type = 'type'
class BatchStatus(atom.core.XmlElement):
"""The batch:status element present in a batch response entry.
A status element contains the code (HTTP response code) and
reason as elements. In a single request these fields would
be part of the HTTP response, but in a batch request each
Entry operation has a corresponding Entry in the response
feed which includes status information.
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'status'
code = 'code'
reason = 'reason'
content_type = 'content-type'
class BatchEntry(GDEntry):
"""An atom:entry for use in batch requests.
The BatchEntry contains additional members to specify the operation to be
performed on this entry and a batch ID so that the server can reference
individual operations in the response feed. For more information, see:
http://code.google.com/apis/gdata/batch.html
"""
batch_operation = BatchOperation
batch_id = BatchId
batch_status = BatchStatus
class BatchInterrupted(atom.core.XmlElement):
"""The batch:interrupted element sent if batch request was interrupted.
Only appears in a feed if some of the batch entries could not be processed.
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'interrupted'
reason = 'reason'
success = 'success'
failures = 'failures'
parsed = 'parsed'
class BatchFeed(GDFeed):
"""A feed containing a list of batch request entries."""
interrupted = BatchInterrupted
entry = [BatchEntry]
def add_batch_entry(self, entry=None, id_url_string=None,
batch_id_string=None, operation_string=None):
"""Logic for populating members of a BatchEntry and adding to the feed.
If the entry is not a BatchEntry, it is converted to a BatchEntry so
that the batch specific members will be present.
The id_url_string can be used in place of an entry if the batch operation
applies to a URL. For example query and delete operations require just
the URL of an entry, no body is sent in the HTTP request. If an
id_url_string is sent instead of an entry, a BatchEntry is created and
added to the feed.
This method also assigns the desired batch id to the entry so that it
can be referenced in the server's response. If the batch_id_string is
None, this method will assign a batch_id to be the index at which this
entry will be in the feed's entry list.
Args:
entry: BatchEntry, atom.data.Entry, or another Entry flavor (optional)
The entry which will be sent to the server as part of the batch
request. The item must have a valid atom id so that the server
knows which entry this request references.
id_url_string: str (optional) The URL of the entry to be acted on. You
can find this URL in the text member of the atom id for an entry.
If an entry is not sent, this id will be used to construct a new
BatchEntry which will be added to the request feed.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
operation_string: str (optional) The desired batch operation which will
set the batch_operation.type member of the entry. Options are
'insert', 'update', 'delete', and 'query'
Raises:
MissingRequiredParameters: Raised if neither an id_ url_string nor an
entry are provided in the request.
Returns:
The added entry.
"""
if entry is None and id_url_string is None:
raise MissingRequiredParameters('supply either an entry or URL string')
if entry is None and id_url_string is not None:
entry = BatchEntry(id=atom.data.Id(text=id_url_string))
if batch_id_string is not None:
entry.batch_id = BatchId(text=batch_id_string)
elif entry.batch_id is None or entry.batch_id.text is None:
entry.batch_id = BatchId(text=str(len(self.entry)))
if operation_string is not None:
entry.batch_operation = BatchOperation(type=operation_string)
self.entry.append(entry)
return entry
AddBatchEntry = add_batch_entry
def add_insert(self, entry, batch_id_string=None):
"""Add an insert request to the operations in this batch request feed.
If the entry doesn't yet have an operation or a batch id, these will
be set to the insert operation and a batch_id specified as a parameter.
Args:
entry: BatchEntry The entry which will be sent in the batch feed as an
insert request.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_INSERT)
AddInsert = add_insert
def add_update(self, entry, batch_id_string=None):
"""Add an update request to the list of batch operations in this feed.
Sets the operation type of the entry to insert if it is not already set
and assigns the desired batch id to the entry so that it can be
referenced in the server's response.
Args:
entry: BatchEntry The entry which will be sent to the server as an
update (HTTP PUT) request. The item must have a valid atom id
so that the server knows which entry to replace.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. See also comments for AddInsert.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_UPDATE)
AddUpdate = add_update
def add_delete(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a delete request to the batch request feed.
This method takes either the url_string which is the atom id of the item
to be deleted, or the entry itself. The atom id of the entry must be
present so that the server knows which entry should be deleted.
Args:
url_string: str (optional) The URL of the entry to be deleted. You can
find this URL in the text member of the atom id for an entry.
entry: BatchEntry (optional) The entry to be deleted.
batch_id_string: str (optional)
Raises:
MissingRequiredParameters: Raised if neither a url_string nor an entry
are provided in the request.
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_DELETE)
AddDelete = add_delete
def add_query(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a query request to the batch request feed.
This method takes either the url_string which is the query URL
whose results will be added to the result feed. The query URL will
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
with a query URL instead of sending a url_string.
Args:
url_string: str (optional)
entry: BatchEntry (optional)
batch_id_string: str (optional)
Raises:
MissingRequiredParameters
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_QUERY)
AddQuery = add_query
def find_batch_link(self):
return self.find_url('http://schemas.google.com/g/2005#batch')
FindBatchLink = find_batch_link
class EntryLink(atom.core.XmlElement):
"""The gd:entryLink element.
Represents a logically nested entry. For example, a <gd:who>
representing a contact might have a nested entry from a contact feed.
"""
_qname = GDATA_TEMPLATE % 'entryLink'
entry = GDEntry
rel = 'rel'
read_only = 'readOnly'
href = 'href'
class FeedLink(atom.core.XmlElement):
"""The gd:feedLink element.
Represents a logically nested feed. For example, a calendar feed might
have a nested feed representing all comments on entries.
"""
_qname = GDATA_TEMPLATE % 'feedLink'
feed = GDFeed
rel = 'rel'
read_only = 'readOnly'
count_hint = 'countHint'
href = 'href'
class AdditionalName(atom.core.XmlElement):
"""The gd:additionalName element.
Specifies additional (eg. middle) name of the person.
Contains an attribute for the phonetic representaton of the name.
"""
_qname = GDATA_TEMPLATE % 'additionalName'
yomi = 'yomi'
class Comments(atom.core.XmlElement):
"""The gd:comments element.
Contains a comments feed for the enclosing entry (such as a calendar event).
"""
_qname = GDATA_TEMPLATE % 'comments'
rel = 'rel'
feed_link = FeedLink
class Country(atom.core.XmlElement):
"""The gd:country element.
Country name along with optional country code. The country code is
given in accordance with ISO 3166-1 alpha-2:
http://www.iso.org/iso/iso-3166-1_decoding_table
"""
_qname = GDATA_TEMPLATE % 'country'
code = 'code'
class EmailImParent(atom.core.XmlElement):
address = 'address'
label = 'label'
rel = 'rel'
primary = 'primary'
class Email(EmailImParent):
"""The gd:email element.
An email address associated with the containing entity (which is
usually an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'email'
display_name = 'displayName'
class FamilyName(atom.core.XmlElement):
"""The gd:familyName element.
Specifies family name of the person, eg. "Smith".
"""
_qname = GDATA_TEMPLATE % 'familyName'
yomi = 'yomi'
class Im(EmailImParent):
"""The gd:im element.
An instant messaging address associated with the containing entity.
"""
_qname = GDATA_TEMPLATE % 'im'
protocol = 'protocol'
class GivenName(atom.core.XmlElement):
"""The gd:givenName element.
Specifies given name of the person, eg. "John".
"""
_qname = GDATA_TEMPLATE % 'givenName'
yomi = 'yomi'
class NamePrefix(atom.core.XmlElement):
"""The gd:namePrefix element.
Honorific prefix, eg. 'Mr' or 'Mrs'.
"""
_qname = GDATA_TEMPLATE % 'namePrefix'
class NameSuffix(atom.core.XmlElement):
"""The gd:nameSuffix element.
Honorific suffix, eg. 'san' or 'III'.
"""
_qname = GDATA_TEMPLATE % 'nameSuffix'
class FullName(atom.core.XmlElement):
"""The gd:fullName element.
Unstructured representation of the name.
"""
_qname = GDATA_TEMPLATE % 'fullName'
class Name(atom.core.XmlElement):
"""The gd:name element.
Allows storing person's name in a structured way. Consists of
given name, additional name, family name, prefix, suffix and full name.
"""
_qname = GDATA_TEMPLATE % 'name'
given_name = GivenName
additional_name = AdditionalName
family_name = FamilyName
name_prefix = NamePrefix
name_suffix = NameSuffix
full_name = FullName
class OrgDepartment(atom.core.XmlElement):
"""The gd:orgDepartment element.
Describes a department within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgDepartment'
class OrgJobDescription(atom.core.XmlElement):
"""The gd:orgJobDescription element.
Describes a job within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgJobDescription'
class OrgName(atom.core.XmlElement):
"""The gd:orgName element.
The name of the organization. Must appear within a gd:organization
element.
Contains a Yomigana attribute (Japanese reading aid) for the
organization name.
"""
_qname = GDATA_TEMPLATE % 'orgName'
yomi = 'yomi'
class OrgSymbol(atom.core.XmlElement):
"""The gd:orgSymbol element.
Provides a symbol of an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgSymbol'
class OrgTitle(atom.core.XmlElement):
"""The gd:orgTitle element.
The title of a person within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgTitle'
class Organization(atom.core.XmlElement):
"""The gd:organization element.
An organization, typically associated with a contact.
"""
_qname = GDATA_TEMPLATE % 'organization'
label = 'label'
primary = 'primary'
rel = 'rel'
department = OrgDepartment
job_description = OrgJobDescription
name = OrgName
symbol = OrgSymbol
title = OrgTitle
class When(atom.core.XmlElement):
"""The gd:when element.
Represents a period of time or an instant.
"""
_qname = GDATA_TEMPLATE % 'when'
end = 'endTime'
start = 'startTime'
value = 'valueString'
class OriginalEvent(atom.core.XmlElement):
"""The gd:originalEvent element.
Equivalent to the Recurrence ID property specified in section 4.8.4.4
of RFC 2445. Appears in every instance of a recurring event, to identify
the original event.
Contains a <gd:when> element specifying the original start time of the
instance that has become an exception.
"""
_qname = GDATA_TEMPLATE % 'originalEvent'
id = 'id'
href = 'href'
when = When
class PhoneNumber(atom.core.XmlElement):
"""The gd:phoneNumber element.
A phone number associated with the containing entity (which is usually
an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'phoneNumber'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class PostalAddress(atom.core.XmlElement):
"""The gd:postalAddress element."""
_qname = GDATA_TEMPLATE % 'postalAddress'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class Rating(atom.core.XmlElement):
"""The gd:rating element.
Represents a numeric rating of the enclosing entity, such as a
comment. Each rating supplies its own scale, although it may be
normalized by a service; for example, some services might convert all
ratings to a scale from 1 to 5.
"""
_qname = GDATA_TEMPLATE % 'rating'
average = 'average'
max = 'max'
min = 'min'
num_raters = 'numRaters'
rel = 'rel'
value = 'value'
class Recurrence(atom.core.XmlElement):
"""The gd:recurrence element.
Represents the dates and times when a recurring event takes place.
The string that defines the recurrence consists of a set of properties,
each of which is defined in the iCalendar standard (RFC 2445).
Specifically, the string usually begins with a DTSTART property that
indicates the starting time of the first instance of the event, and
often a DTEND property or a DURATION property to indicate when the
first instance ends. Next come RRULE, RDATE, EXRULE, and/or EXDATE
properties, which collectively define a recurring event and its
exceptions (but see below). (See section 4.8.5 of RFC 2445 for more
information about these recurrence component properties.) Last comes a
VTIMEZONE component, providing detailed timezone rules for any timezone
ID mentioned in the preceding properties.
Google services like Google Calendar don't generally generate EXRULE
and EXDATE properties to represent exceptions to recurring events;
instead, they generate <gd:recurrenceException> elements. However,
Google services may include EXRULE and/or EXDATE properties anyway;
for example, users can import events and exceptions into Calendar, and
if those imported events contain EXRULE or EXDATE properties, then
Calendar will provide those properties when it sends a <gd:recurrence>
element.
Note the the use of <gd:recurrenceException> means that you can't be
sure just from examining a <gd:recurrence> element whether there are
any exceptions to the recurrence description. To ensure that you find
all exceptions, look for <gd:recurrenceException> elements in the feed,
and use their <gd:originalEvent> elements to match them up with
<gd:recurrence> elements.
"""
_qname = GDATA_TEMPLATE % 'recurrence'
class RecurrenceException(atom.core.XmlElement):
"""The gd:recurrenceException element.
Represents an event that's an exception to a recurring event-that is,
an instance of a recurring event in which one or more aspects of the
recurring event (such as attendance list, time, or location) have been
changed.
Contains a <gd:originalEvent> element that specifies the original
recurring event that this event is an exception to.
When you change an instance of a recurring event, that instance becomes
an exception. Depending on what change you made to it, the exception
behaves in either of two different ways when the original recurring
event is changed:
- If you add, change, or remove comments, attendees, or attendee
responses, then the exception remains tied to the original event, and
changes to the original event also change the exception.
- If you make any other changes to the exception (such as changing the
time or location) then the instance becomes "specialized," which means
that it's no longer as tightly tied to the original event. If you
change the original event, specialized exceptions don't change. But
see below.
For example, say you have a meeting every Tuesday and Thursday at
2:00 p.m. If you change the attendance list for this Thursday's meeting
(but not for the regularly scheduled meeting), then it becomes an
exception. If you change the time for this Thursday's meeting (but not
for the regularly scheduled meeting), then it becomes specialized.
Regardless of whether an exception is specialized or not, if you do
something that deletes the instance that the exception was derived from,
then the exception is deleted. Note that changing the day or time of a
recurring event deletes all instances, and creates new ones.
For example, after you've specialized this Thursday's meeting, say you
change the recurring meeting to happen on Monday, Wednesday, and Friday.
That change deletes all of the recurring instances of the
Tuesday/Thursday meeting, including the specialized one.
If a particular instance of a recurring event is deleted, then that
instance appears as a <gd:recurrenceException> containing a
<gd:entryLink> that has its <gd:eventStatus> set to
"http://schemas.google.com/g/2005#event.canceled". (For more
information about canceled events, see RFC 2445.)
"""
_qname = GDATA_TEMPLATE % 'recurrenceException'
specialized = 'specialized'
entry_link = EntryLink
original_event = OriginalEvent
class Reminder(atom.core.XmlElement):
"""The gd:reminder element.
A time interval, indicating how long before the containing entity's start
time or due time attribute a reminder should be issued. Alternatively,
may specify an absolute time at which a reminder should be issued. Also
specifies a notification method, indicating what medium the system
should use to remind the user.
"""
_qname = GDATA_TEMPLATE % 'reminder'
absolute_time = 'absoluteTime'
method = 'method'
days = 'days'
hours = 'hours'
minutes = 'minutes'
class Agent(atom.core.XmlElement):
"""The gd:agent element.
The agent who actually receives the mail. Used in work addresses.
Also for 'in care of' or 'c/o'.
"""
_qname = GDATA_TEMPLATE % 'agent'
class HouseName(atom.core.XmlElement):
"""The gd:housename element.
Used in places where houses or buildings have names (and not
necessarily numbers), eg. "The Pillars".
"""
_qname = GDATA_TEMPLATE % 'housename'
class Street(atom.core.XmlElement):
"""The gd:street element.
Can be street, avenue, road, etc. This element also includes the
house number and room/apartment/flat/floor number.
"""
_qname = GDATA_TEMPLATE % 'street'
class PoBox(atom.core.XmlElement):
"""The gd:pobox element.
Covers actual P.O. boxes, drawers, locked bags, etc. This is usually
but not always mutually exclusive with street.
"""
_qname = GDATA_TEMPLATE % 'pobox'
class Neighborhood(atom.core.XmlElement):
"""The gd:neighborhood element.
This is used to disambiguate a street address when a city contains more
than one street with the same name, or to specify a small place whose
mail is routed through a larger postal town. In China it could be a
county or a minor city.
"""
_qname = GDATA_TEMPLATE % 'neighborhood'
class City(atom.core.XmlElement):
"""The gd:city element.
Can be city, village, town, borough, etc. This is the postal town and
not necessarily the place of residence or place of business.
"""
_qname = GDATA_TEMPLATE % 'city'
class Subregion(atom.core.XmlElement):
"""The gd:subregion element.
Handles administrative districts such as U.S. or U.K. counties that are
not used for mail addressing purposes. Subregion is not intended for
delivery addresses.
"""
_qname = GDATA_TEMPLATE % 'subregion'
class Region(atom.core.XmlElement):
"""The gd:region element.
A state, province, county (in Ireland), Land (in Germany),
departement (in France), etc.
"""
_qname = GDATA_TEMPLATE % 'region'
class Postcode(atom.core.XmlElement):
"""The gd:postcode element.
Postal code. Usually country-wide, but sometimes specific to the
city (e.g. "2" in "Dublin 2, Ireland" addresses).
"""
_qname = GDATA_TEMPLATE % 'postcode'
class Country(atom.core.XmlElement):
"""The gd:country element.
The name or code of the country.
"""
_qname = GDATA_TEMPLATE % 'country'
class FormattedAddress(atom.core.XmlElement):
"""The gd:formattedAddress element.
The full, unstructured postal address.
"""
_qname = GDATA_TEMPLATE % 'formattedAddress'
class StructuredPostalAddress(atom.core.XmlElement):
"""The gd:structuredPostalAddress element.
Postal address split into components. It allows to store the address
in locale independent format. The fields can be interpreted and used
to generate formatted, locale dependent address. The following elements
reperesent parts of the address: agent, house name, street, P.O. box,
neighborhood, city, subregion, region, postal code, country. The
subregion element is not used for postal addresses, it is provided for
extended uses of addresses only. In order to store postal address in an
unstructured form formatted address field is provided.
"""
_qname = GDATA_TEMPLATE % 'structuredPostalAddress'
rel = 'rel'
mail_class = 'mailClass'
usage = 'usage'
label = 'label'
primary = 'primary'
agent = Agent
house_name = HouseName
street = Street
po_box = PoBox
neighborhood = Neighborhood
city = City
subregion = Subregion
region = Region
postcode = Postcode
country = Country
formatted_address = FormattedAddress
class Where(atom.core.XmlElement):
"""The gd:where element.
A place (such as an event location) associated with the containing
entity. The type of the association is determined by the rel attribute;
the details of the location are contained in an embedded or linked-to
Contact entry.
A <gd:where> element is more general than a <gd:geoPt> element. The
former identifies a place using a text description and/or a Contact
entry, while the latter identifies a place using a specific geographic
location.
"""
_qname = GDATA_TEMPLATE % 'where'
label = 'label'
rel = 'rel'
value = 'valueString'
entry_link = EntryLink
class AttendeeType(atom.core.XmlElement):
"""The gd:attendeeType element."""
_qname = GDATA_TEMPLATE % 'attendeeType'
value = 'value'
class AttendeeStatus(atom.core.XmlElement):
"""The gd:attendeeStatus element."""
_qname = GDATA_TEMPLATE % 'attendeeStatus'
value = 'value'
class Who(atom.core.XmlElement):
"""The gd:who element.
A person associated with the containing entity. The type of the
association is determined by the rel attribute; the details about the
person are contained in an embedded or linked-to Contact entry.
The <gd:who> element can be used to specify email senders and
recipients, calendar event organizers, and so on.
"""
_qname = GDATA_TEMPLATE % 'who'
email = 'email'
rel = 'rel'
value = 'valueString'
attendee_status = AttendeeStatus
attendee_type = AttendeeType
entry_link = EntryLink
class Deleted(atom.core.XmlElement):
"""gd:deleted when present, indicates the containing entry is deleted."""
_qname = GD_TEMPLATE % 'deleted'
class Money(atom.core.XmlElement):
"""Describes money"""
_qname = GD_TEMPLATE % 'money'
amount = 'amount'
currency_code = 'currencyCode'
class MediaSource(object):
"""GData Entries can refer to media sources, so this class provides a
place to store references to these objects along with some metadata.
"""
def __init__(self, file_handle=None, content_type=None, content_length=None,
file_path=None, file_name=None):
"""Creates an object of type MediaSource.
Args:
file_handle: A file handle pointing to the file to be encapsulated in the
MediaSource.
content_type: string The MIME type of the file. Required if a file_handle
is given.
content_length: int The size of the file. Required if a file_handle is
given.
file_path: string (optional) A full path name to the file. Used in
place of a file_handle.
file_name: string The name of the file without any path information.
Required if a file_handle is given.
"""
self.file_handle = file_handle
self.content_type = content_type
self.content_length = content_length
self.file_name = file_name
if (file_handle is None and content_type is not None and
file_path is not None):
self.set_file_handle(file_path, content_type)
def set_file_handle(self, file_name, content_type):
"""A helper function which can create a file handle from a given filename
and set the content type and length all at once.
Args:
file_name: string The path and file name to the file containing the media
content_type: string A MIME type representing the type of the media
"""
self.file_handle = open(file_name, 'rb')
self.content_type = content_type
self.content_length = os.path.getsize(file_name)
self.file_name = os.path.basename(file_name)
SetFileHandle = set_file_handle
def modify_request(self, http_request):
http_request.add_body_part(self.file_handle, self.content_type,
self.content_length)
return http_request
ModifyRequest = modify_request
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the Contacts API."""
__author__ = 'vinces1979@gmail.com (Vince Spicer)'
import atom.core
import gdata
import gdata.data
PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
EXTERNAL_ID_ORGANIZATION = 'organization'
RELATION_MANAGER = 'manager'
CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
CONTACTS_TEMPLATE = '{%s}%%s' % CONTACTS_NAMESPACE
class BillingInformation(atom.core.XmlElement):
"""
gContact:billingInformation
Specifies billing information of the entity represented by the contact. The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'billingInformation'
class Birthday(atom.core.XmlElement):
"""
Stores birthday date of the person represented by the contact. The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'birthday'
when = 'when'
class CalendarLink(atom.core.XmlElement):
"""
Storage for URL of the contact's calendar. The element can be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'calendarLink'
rel = 'rel'
label = 'label'
primary = 'primary'
href = 'href'
class DirectoryServer(atom.core.XmlElement):
"""
A directory server associated with this contact.
May not be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'directoryServer'
class Event(atom.core.XmlElement):
"""
These elements describe events associated with a contact.
They may be repeated
"""
_qname = CONTACTS_TEMPLATE % 'event'
label = 'label'
rel = 'rel'
when = gdata.data.When
class ExternalId(atom.core.XmlElement):
"""
Describes an ID of the contact in an external system of some kind.
This element may be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'externalId'
def ExternalIdFromString(xml_string):
return atom.core.parse(ExternalId, xml_string)
class Gender(atom.core.XmlElement):
"""
Specifies the gender of the person represented by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'directoryServer'
value = 'value'
class Hobby(atom.core.XmlElement):
"""
Describes an ID of the contact in an external system of some kind.
This element may be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'hobby'
class Initials(atom.core.XmlElement):
""" Specifies the initials of the person represented by the contact. The
element cannot be repeated. """
_qname = CONTACTS_TEMPLATE % 'initials'
class Jot(atom.core.XmlElement):
"""
Storage for arbitrary pieces of information about the contact. Each jot
has a type specified by the rel attribute and a text value.
The element can be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'jot'
rel = 'rel'
class Language(atom.core.XmlElement):
"""
Specifies the preferred languages of the contact.
The element can be repeated.
The language must be specified using one of two mutually exclusive methods:
using the freeform @label attribute, or using the @code attribute, whose value
must conform to the IETF BCP 47 specification.
"""
_qname = CONTACTS_TEMPLATE % 'language'
code = 'code'
label = 'label'
class MaidenName(atom.core.XmlElement):
"""
Specifies maiden name of the person represented by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'maidenName'
class Mileage(atom.core.XmlElement):
"""
Specifies the mileage for the entity represented by the contact.
Can be used for example to document distance needed for reimbursement
purposes. The value is not interpreted. The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'mileage'
class NickName(atom.core.XmlElement):
"""
Specifies the nickname of the person represented by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'nickname'
class Occupation(atom.core.XmlElement):
"""
Specifies the occupation/profession of the person specified by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'occupation'
class Priority(atom.core.XmlElement):
"""
Classifies importance of the contact into 3 categories:
* Low
* Normal
* High
The priority element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'priority'
class Relation(atom.core.XmlElement):
"""
This element describe another entity (usually a person) that is in a
relation of some kind with the contact.
"""
_qname = CONTACTS_TEMPLATE % 'relation'
rel = 'rel'
label = 'label'
class Sensitivity(atom.core.XmlElement):
"""
Classifies sensitivity of the contact into the following categories:
* Confidential
* Normal
* Personal
* Private
The sensitivity element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'sensitivity'
rel = 'rel'
class UserDefinedField(atom.core.XmlElement):
"""
Represents an arbitrary key-value pair attached to the contact.
"""
_qname = CONTACTS_TEMPLATE % 'userDefinedField'
key = 'key'
value = 'value'
def UserDefinedFieldFromString(xml_string):
return atom.core.parse(UserDefinedField, xml_string)
class Website(atom.core.XmlElement):
"""
Describes websites associated with the contact, including links.
May be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'website'
href = 'href'
label = 'label'
primary = 'primary'
rel = 'rel'
def WebsiteFromString(xml_string):
return atom.core.parse(Website, xml_string)
class HouseName(atom.core.XmlElement):
"""
Used in places where houses or buildings have names (and
not necessarily numbers), eg. "The Pillars".
"""
_qname = CONTACTS_TEMPLATE % 'housename'
class Street(atom.core.XmlElement):
"""
Can be street, avenue, road, etc. This element also includes the house
number and room/apartment/flat/floor number.
"""
_qname = CONTACTS_TEMPLATE % 'street'
class POBox(atom.core.XmlElement):
"""
Covers actual P.O. boxes, drawers, locked bags, etc. This is usually but not
always mutually exclusive with street
"""
_qname = CONTACTS_TEMPLATE % 'pobox'
class Neighborhood(atom.core.XmlElement):
"""
This is used to disambiguate a street address when a city contains more than
one street with the same name, or to specify a small place whose mail is
routed through a larger postal town. In China it could be a county or a
minor city.
"""
_qname = CONTACTS_TEMPLATE % 'neighborhood'
class City(atom.core.XmlElement):
"""
Can be city, village, town, borough, etc. This is the postal town and not
necessarily the place of residence or place of business.
"""
_qname = CONTACTS_TEMPLATE % 'city'
class SubRegion(atom.core.XmlElement):
"""
Handles administrative districts such as U.S. or U.K. counties that are not
used for mail addressing purposes. Subregion is not intended for
delivery addresses.
"""
_qname = CONTACTS_TEMPLATE % 'subregion'
class Region(atom.core.XmlElement):
"""
A state, province, county (in Ireland), Land (in Germany),
departement (in France), etc.
"""
_qname = CONTACTS_TEMPLATE % 'region'
class PostalCode(atom.core.XmlElement):
"""
Postal code. Usually country-wide, but sometimes specific to the
city (e.g. "2" in "Dublin 2, Ireland" addresses).
"""
_qname = CONTACTS_TEMPLATE % 'postcode'
class Country(atom.core.XmlElement):
""" The name or code of the country. """
_qname = CONTACTS_TEMPLATE % 'country'
class PersonEntry(gdata.data.BatchEntry):
"""Represents a google contact"""
billing_information = BillingInformation
birthday = Birthday
calendar_link = [CalendarLink]
directory_server = DirectoryServer
event = [Event]
external_id = [ExternalId]
gender = Gender
hobby = [Hobby]
initals = Initials
jot = [Jot]
language= [Language]
maiden_name = MaidenName
mileage = Mileage
nickname = NickName
occupation = Occupation
priority = Priority
relation = [Relation]
sensitivity = Sensitivity
user_defined_field = [UserDefinedField]
website = [Website]
name = gdata.data.Name
phone_number = [gdata.data.PhoneNumber]
organization = gdata.data.Organization
postal_address = [gdata.data.PostalAddress]
email = [gdata.data.Email]
im = [gdata.data.Im]
structured_postal_address = [gdata.data.StructuredPostalAddress]
extended_property = [gdata.data.ExtendedProperty]
class Deleted(atom.core.XmlElement):
"""If present, indicates that this contact has been deleted."""
_qname = gdata.GDATA_TEMPLATE % 'deleted'
class GroupMembershipInfo(atom.core.XmlElement):
"""
Identifies the group to which the contact belongs or belonged.
The group is referenced by its id.
"""
_qname = CONTACTS_TEMPLATE % 'groupMembershipInfo'
href = 'href'
deleted = 'deleted'
class ContactEntry(PersonEntry):
"""A Google Contacts flavor of an Atom Entry."""
deleted = Deleted
group_membership_info = [GroupMembershipInfo]
organization = gdata.data.Organization
def GetPhotoLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_LINK_REL:
return a_link
return None
def GetPhotoEditLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_EDIT_LINK_REL:
return a_link
return None
class ContactsFeed(gdata.data.BatchFeed):
"""A collection of Contacts."""
entry = [ContactEntry]
class SystemGroup(atom.core.XmlElement):
"""The contacts systemGroup element.
When used within a contact group entry, indicates that the group in
question is one of the predefined system groups."""
_qname = CONTACTS_TEMPLATE % 'systemGroup'
id = 'id'
class GroupEntry(gdata.data.BatchEntry):
"""Represents a contact group."""
extended_property = [gdata.data.ExtendedProperty]
system_group = SystemGroup
class GroupsFeed(gdata.data.BatchFeed):
"""A Google contact groups feed flavor of an Atom Feed."""
entry = [GroupEntry]
class ProfileEntry(PersonEntry):
"""A Google Profiles flavor of an Atom Entry."""
def ProfileEntryFromString(xml_string):
"""Converts an XML string into a ProfileEntry object.
Args:
xml_string: string The XML describing a Profile entry.
Returns:
A ProfileEntry object corresponding to the given XML.
"""
return atom.core.parse(ProfileEntry, xml_string)
class ProfilesFeed(gdata.data.BatchFeed):
"""A Google Profiles feed flavor of an Atom Feed."""
_qname = atom.data.ATOM_TEMPLATE % 'feed'
entry = [ProfileEntry]
def ProfilesFeedFromString(xml_string):
"""Converts an XML string into a ProfilesFeed object.
Args:
xml_string: string The XML describing a Profiles feed.
Returns:
A ProfilesFeed object corresponding to the given XML.
"""
return atom.core.parse(ProfilesFeed, xml_string)
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from types import ListType, DictionaryType
"""Contains a client to communicate with the Contacts servers.
For documentation on the Contacts API, see:
http://code.google.com/apis/contatcs/
"""
__author__ = 'vinces1979@gmail.com (Vince Spicer)'
import gdata.client
import gdata.contacts.data
import atom.data
import atom.http_core
import gdata.gauth
class ContactsClient(gdata.client.GDClient):
api_version = '3'
auth_service = 'cp'
server = "www.google.com"
contact_list = "default"
auth_scopes = gdata.gauth.AUTH_SCOPES['cp']
def get_feed_uri(self, kind='contacts', contact_list=None, projection='full',
scheme="http"):
"""Builds a feed URI.
Args:
kind: The type of feed to return, typically 'groups' or 'contacts'.
Default value: 'contacts'.
contact_list: The contact list to return a feed for.
Default value: self.contact_list.
projection: The projection to apply to the feed contents, for example
'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'.
scheme: The URL scheme such as 'http' or 'https', None to return a
relative URI without hostname.
Returns:
A feed URI using the given kind, contact list, and projection.
Example: '/m8/feeds/contacts/default/full'.
"""
contact_list = contact_list or self.contact_list
if kind == 'profiles':
contact_list = 'domain/%s' % contact_list
prefix = scheme and '%s://%s' % (scheme, self.server) or ''
return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection)
GetFeedUri = get_feed_uri
def get_contact(self, uri, desired_class=gdata.contacts.data.ContactEntry,
auth_token=None, **kwargs):
return self.get_feed(uri, auth_token=auth_token,
desired_class=desired_class, **kwargs)
GetContact = get_contact
def create_contact(self, new_contact, insert_uri=None, auth_token=None, **kwargs):
"""Adds an new contact to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new contact which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
insert_uri = insert_uri or self.GetFeedUri()
return self.Post(new_contact, insert_uri,
auth_token=auth_token, **kwargs)
CreateContact = create_contact
def add_contact(self, new_contact, insert_uri=None, auth_token=None,
billing_information=None, birthday=None, calendar_link=None, **kwargs):
"""Adds an new contact to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new contact which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
contact = gdata.contacts.data.ContactEntry()
if billing_information is not None:
if not isinstance(billing_information, gdata.contacts.data.BillingInformation):
billing_information = gdata.contacts.data.BillingInformation(text=billing_information)
contact.billing_information = billing_information
if birthday is not None:
if not isinstance(birthday, gdata.contacts.data.Birthday):
birthday = gdata.contacts.data.Birthday(when=birthday)
contact.birthday = birthday
if calendar_link is not None:
if type(calendar_link) is not ListType:
calendar_link = [calendar_link]
for link in calendar_link:
if not isinstance(link, gdata.contacts.data.CalendarLink):
if type(link) is not DictionaryType:
raise TypeError, "calendar_link Requires dictionary not %s" % type(link)
link = gdata.contacts.data.CalendarLink(
rel=link.get("rel", None),
label=link.get("label", None),
primary=link.get("primary", None),
href=link.get("href", None),
)
contact.calendar_link.append(link)
insert_uri = insert_uri or self.GetFeedUri()
return self.Post(contact, insert_uri,
auth_token=auth_token, **kwargs)
AddContact = add_contact
def get_contacts(self, desired_class=gdata.contacts.data.ContactsFeed,
auth_token=None, **kwargs):
"""Obtains a feed with the contacts belonging to the current user.
Args:
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient.
desired_class: class descended from atom.core.XmlElement to which a
successful response should be converted. If there is no
converter function specified (desired_class=None) then the
desired_class will be used in calling the
atom.core.parse function. If neither
the desired_class nor the converter is specified, an
HTTP reponse object will be returned. Defaults to
gdata.spreadsheets.data.SpreadsheetsFeed.
"""
return self.get_feed(self.GetFeedUri(), auth_token=auth_token,
desired_class=desired_class, **kwargs)
GetContacts = get_contacts
def get_group(self, uri=None, desired_class=gdata.contacts.data.GroupEntry,
auth_token=None, **kwargs):
""" Get a single groups details
Args:
uri: the group uri or id
"""
return self.get(uri, desired_class=desired_class, auth_token=auth_token, **kwargs)
GetGroup = get_group
def get_groups(self, uri=None, desired_class=gdata.contacts.data.GroupsFeed,
auth_token=None, **kwargs):
uri = uri or self.GetFeedUri('groups')
return self.get_feed(uri, desired_class=desired_class, auth_token=auth_token, **kwargs)
GetGroups = get_groups
def create_group(self, new_group, insert_uri=None, url_params=None,
desired_class=None):
insert_uri = insert_uri or self.GetFeedUri('groups')
return self.Post(new_group, insert_uri, url_params=url_params,
desired_class=desired_class)
CreateGroup = create_group
def update_group(self, edit_uri, updated_group, url_params=None,
escape_params=True, desired_class=None):
return self.Put(updated_group, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
desired_class=desired_class)
UpdateGroup = update_group
def delete_group(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
DeleteGroup = delete_group
def change_photo(self, media, contact_entry_or_url, content_type=None,
content_length=None):
"""Change the photo for the contact by uploading a new photo.
Performs a PUT against the photo edit URL to send the binary data for the
photo.
Args:
media: filename, file-like-object, or a gdata.MediaSource object to send.
contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this
method will search for an edit photo link URL and
perform a PUT to the URL.
content_type: str (optional) the mime type for the photo data. This is
necessary if media is a file or file name, but if media
is a MediaSource object then the media object can contain
the mime type. If media_type is set, it will override the
mime type in the media object.
content_length: int or str (optional) Specifying the content length is
only required if media is a file-like object. If media
is a filename, the length is determined using
os.path.getsize. If media is a MediaSource object, it is
assumed that it already contains the content length.
"""
if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if isinstance(media, gdata.MediaSource):
payload = media
# If the media object is a file-like object, then use it as the file
# handle in the in the MediaSource.
elif hasattr(media, 'read'):
payload = gdata.MediaSource(file_handle=media,
content_type=content_type, content_length=content_length)
# Assume that the media object is a file name.
else:
payload = gdata.MediaSource(content_type=content_type,
content_length=content_length, file_path=media)
return self.Put(payload, url)
ChangePhoto = change_photo
def get_photo(self, contact_entry_or_url):
"""Retrives the binary data for the contact's profile photo as a string.
Args:
contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string
containing the photo link's URL. If the contact entry does not
contain a photo link, the image will not be fetched and this method
will return None.
"""
# TODO: add the ability to write out the binary image data to a file,
# reading and writing a chunk at a time to avoid potentially using up
# large amounts of memory.
url = None
if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry):
photo_link = contact_entry_or_url.GetPhotoLink()
if photo_link:
url = photo_link.href
else:
url = contact_entry_or_url
if url:
return self.Get(url, desired_class=str)
else:
return None
GetPhoto = get_photo
def delete_photo(self, contact_entry_or_url):
url = None
if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if url:
self.Delete(url)
DeletePhoto = delete_photo
def get_profiles_feed(self, uri=None):
"""Retrieves a feed containing all domain's profiles.
Args:
uri: string (optional) the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full
Returns:
On success, a ProfilesFeed containing the profiles.
On failure, raises a RequestError.
"""
uri = uri or self.GetFeedUri('profiles')
return self.Get(uri,
desired_class=gdata.contacts.data.ProfilesFeedFromString)
GetProfilesFeed = get_profiles_feed
def get_profile(self, uri):
"""Retrieves a domain's profile for the user.
Args:
uri: string the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full/username
Returns:
On success, a ProfileEntry containing the profile for the user.
On failure, raises a RequestError
"""
return self.Get(uri,
desired_class=gdata.contacts.data.ProfileEntryFromString)
GetProfile = get_profile
def update_profile(self, edit_uri, updated_profile, auth_token=None, **kwargs):
"""Updates an existing profile.
Args:
edit_uri: string The edit link URI for the element being updated
updated_profile: string atom.Entry or subclass containing
the Atom Entry which will replace the profile which is
stored at the edit_url.
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_params will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, raises a RequestError.
"""
return self.Put(updated_profile, self._CleanUri(edit_uri),
desired_class=gdata.contacts.data.ProfileEntryFromString)
UpdateProfile = update_profile
def execute_batch(self, batch_feed, url, desired_class=None):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.contacts.ContactFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: str The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ContactsFeed.
"""
return self.Post(batch_feed, url, desired_class=desired_class)
ExecuteBatch = execute_batch
def execute_batch_profiles(self, batch_feed, url,
desired_class=gdata.contacts.data.ProfilesFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.profiles.ProfilesFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: string The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is
gdata.profiles.ProfilesFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ProfilesFeed.
"""
return self.Post(batch_feed, url, desired_class=desired_class)
ExecuteBatchProfiles = execute_batch_profiles
class ContactsQuery(gdata.client.Query):
"""
Create a custom Contacts Query
Full specs can be found at: U{Contacts query parameters reference
<http://code.google.com/apis/contacts/docs/3.0/reference.html#Parameters>}
"""
def __init__(self, feed=None, group=None, orderby=None, showdeleted=None,
sortorder=None, requirealldeleted=None, **kwargs):
"""
@param max_results: The maximum number of entries to return. If you want
to receive all of the contacts, rather than only the default maximum, you
can specify a very large number for max-results.
@param start-index: The 1-based index of the first result to be retrieved.
@param updated-min: The lower bound on entry update dates.
@param group: Constrains the results to only the contacts belonging to the
group specified. Value of this parameter specifies group ID
@param orderby: Sorting criterion. The only supported value is
lastmodified.
@param showdeleted: Include deleted contacts in the returned contacts feed
@pram sortorder: Sorting order direction. Can be either ascending or
descending.
@param requirealldeleted: Only relevant if showdeleted and updated-min
are also provided. It dictates the behavior of the server in case it
detects that placeholders of some entries deleted since the point in
time specified as updated-min may have been lost.
"""
gdata.client.Query.__init__(self, **kwargs)
self.group = group
self.orderby = orderby
self.sortorder = sortorder
self.showdeleted = showdeleted
def modify_request(self, http_request):
if self.group:
gdata.client._add_query_param('group', self.group, http_request)
if self.orderby:
gdata.client._add_query_param('orderby', self.orderby, http_request)
if self.sortorder:
gdata.client._add_query_param('sortorder', self.sortorder, http_request)
if self.showdeleted:
gdata.client._add_query_param('showdeleted', self.showdeleted, http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
class ProfilesQuery(gdata.client.Query):
def __init__(self, feed=None):
self.feed = feed or 'http://www.google.com/m8/feeds/profiles/default/full'
def _CleanUri(self, uri):
"""Sanitizes a feed URI.
Args:
uri: The URI to sanitize, can be relative or absolute.
Returns:
The given URI without its http://server prefix, if any.
Keeps the leading slash of the URI.
"""
url_prefix = 'http://%s' % self.server
if uri.startswith(url_prefix):
uri = uri[len(url_prefix):]
return uri
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to ElementWrapper objects used with Google Contacts."""
__author__ = 'dbrattli (Dag Brattli)'
import atom
import gdata
## Constants from http://code.google.com/apis/gdata/elements.html ##
REL_HOME = 'http://schemas.google.com/g/2005#home'
REL_WORK = 'http://schemas.google.com/g/2005#work'
REL_OTHER = 'http://schemas.google.com/g/2005#other'
# AOL Instant Messenger protocol
IM_AIM = 'http://schemas.google.com/g/2005#AIM'
IM_MSN = 'http://schemas.google.com/g/2005#MSN' # MSN Messenger protocol
IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO' # Yahoo Messenger protocol
IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE' # Skype protocol
IM_QQ = 'http://schemas.google.com/g/2005#QQ' # QQ protocol
# Google Talk protocol
IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
IM_ICQ = 'http://schemas.google.com/g/2005#ICQ' # ICQ protocol
IM_JABBER = 'http://schemas.google.com/g/2005#JABBER' # Jabber protocol
IM_NETMEETING = 'http://schemas.google.com/g/2005#netmeeting' # NetMeeting
PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
# Different phone types, for moro info see:
# http://code.google.com/apis/gdata/docs/2.0/elements.html#gdPhoneNumber
PHONE_CAR = 'http://schemas.google.com/g/2005#car'
PHONE_FAX = 'http://schemas.google.com/g/2005#fax'
PHONE_GENERAL = 'http://schemas.google.com/g/2005#general'
PHONE_HOME = REL_HOME
PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax'
PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension'
PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile'
PHONE_OTHER = REL_OTHER
PHONE_PAGER = 'http://schemas.google.com/g/2005#pager'
PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite'
PHONE_VOIP = 'http://schemas.google.com/g/2005#voip'
PHONE_WORK = REL_WORK
PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax'
PHONE_WORK_MOBILE = 'http://schemas.google.com/g/2005#work_mobile'
PHONE_WORK_PAGER = 'http://schemas.google.com/g/2005#work_pager'
PHONE_MAIN = 'http://schemas.google.com/g/2005#main'
PHONE_ASSISTANT = 'http://schemas.google.com/g/2005#assistant'
PHONE_CALLBACK = 'http://schemas.google.com/g/2005#callback'
PHONE_COMPANY_MAIN = 'http://schemas.google.com/g/2005#company_main'
PHONE_ISDN = 'http://schemas.google.com/g/2005#isdn'
PHONE_OTHER_FAX = 'http://schemas.google.com/g/2005#other_fax'
PHONE_RADIO = 'http://schemas.google.com/g/2005#radio'
PHONE_TELEX = 'http://schemas.google.com/g/2005#telex'
PHONE_TTY_TDD = 'http://schemas.google.com/g/2005#tty_tdd'
EXTERNAL_ID_ORGANIZATION = 'organization'
RELATION_MANAGER = 'manager'
CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
class GDataBase(atom.AtomBase):
"""The Google Contacts intermediate class from atom.AtomBase."""
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None,
extension_elements=None, extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class ContactsBase(GDataBase):
"""The Google Contacts intermediate class for Contacts namespace."""
_namespace = CONTACTS_NAMESPACE
class OrgName(GDataBase):
"""The Google Contacts OrgName element."""
_tag = 'orgName'
class OrgTitle(GDataBase):
"""The Google Contacts OrgTitle element."""
_tag = 'orgTitle'
class OrgDepartment(GDataBase):
"""The Google Contacts OrgDepartment element."""
_tag = 'orgDepartment'
class OrgJobDescription(GDataBase):
"""The Google Contacts OrgJobDescription element."""
_tag = 'orgJobDescription'
class Where(GDataBase):
"""The Google Contacts Where element."""
_tag = 'where'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['label'] = 'label'
_attributes['valueString'] = 'value_string'
def __init__(self, value_string=None, rel=None, label=None,
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel
self.label = label
self.value_string = value_string
class When(GDataBase):
"""The Google Contacts When element."""
_tag = 'when'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['startTime'] = 'start_time'
_attributes['endTime'] = 'end_time'
_attributes['label'] = 'label'
def __init__(self, start_time=None, end_time=None, label=None,
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.start_time = start_time
self.end_time = end_time
self.label = label
class Organization(GDataBase):
"""The Google Contacts Organization element."""
_tag = 'organization'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
_children['{%s}orgName' % GDataBase._namespace] = (
'org_name', OrgName)
_children['{%s}orgTitle' % GDataBase._namespace] = (
'org_title', OrgTitle)
_children['{%s}orgDepartment' % GDataBase._namespace] = (
'org_department', OrgDepartment)
_children['{%s}orgJobDescription' % GDataBase._namespace] = (
'org_job_description', OrgJobDescription)
#_children['{%s}where' % GDataBase._namespace] = ('where', Where)
def __init__(self, label=None, rel=None, primary='false', org_name=None,
org_title=None, org_department=None, org_job_description=None,
where=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.primary = primary
self.org_name = org_name
self.org_title = org_title
self.org_department = org_department
self.org_job_description = org_job_description
self.where = where
class PostalAddress(GDataBase):
"""The Google Contacts PostalAddress element."""
_tag = 'postalAddress'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
def __init__(self, primary=None, rel=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel or REL_OTHER
self.primary = primary
class FormattedAddress(GDataBase):
"""The Google Contacts FormattedAddress element."""
_tag = 'formattedAddress'
class StructuredPostalAddress(GDataBase):
"""The Google Contacts StructuredPostalAddress element."""
_tag = 'structuredPostalAddress'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
_children['{%s}formattedAddress' % GDataBase._namespace] = (
'formatted_address', FormattedAddress)
def __init__(self, rel=None, primary=None,
formatted_address=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel or REL_OTHER
self.primary = primary
self.formatted_address = formatted_address
class IM(GDataBase):
"""The Google Contacts IM element."""
_tag = 'im'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['address'] = 'address'
_attributes['primary'] = 'primary'
_attributes['protocol'] = 'protocol'
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
def __init__(self, primary='false', rel=None, address=None, protocol=None,
label=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.protocol = protocol
self.address = address
self.primary = primary
self.rel = rel or REL_OTHER
self.label = label
class Email(GDataBase):
"""The Google Contacts Email element."""
_tag = 'email'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['address'] = 'address'
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
_attributes['label'] = 'label'
def __init__(self, label=None, rel=None, address=None, primary='false',
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.address = address
self.primary = primary
class PhoneNumber(GDataBase):
"""The Google Contacts PhoneNumber element."""
_tag = 'phoneNumber'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['uri'] = 'uri'
_attributes['primary'] = 'primary'
def __init__(self, label=None, rel=None, uri=None, primary='false',
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.uri = uri
self.primary = primary
class Nickname(ContactsBase):
"""The Google Contacts Nickname element."""
_tag = 'nickname'
class Occupation(ContactsBase):
"""The Google Contacts Occupation element."""
_tag = 'occupation'
class Gender(ContactsBase):
"""The Google Contacts Gender element."""
_tag = 'gender'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.value = value
class Birthday(ContactsBase):
"""The Google Contacts Birthday element."""
_tag = 'birthday'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['when'] = 'when'
def __init__(self, when=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.when = when
class Relation(ContactsBase):
"""The Google Contacts Relation element."""
_tag = 'relation'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
def __init__(self, label=None, rel=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
def RelationFromString(xml_string):
return atom.CreateClassFromXMLString(Relation, xml_string)
class UserDefinedField(ContactsBase):
"""The Google Contacts UserDefinedField element."""
_tag = 'userDefinedField'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['key'] = 'key'
_attributes['value'] = 'value'
def __init__(self, key=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.key = key
self.value = value
def UserDefinedFieldFromString(xml_string):
return atom.CreateClassFromXMLString(UserDefinedField, xml_string)
class Website(ContactsBase):
"""The Google Contacts Website element."""
_tag = 'website'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['href'] = 'href'
_attributes['label'] = 'label'
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
def __init__(self, href=None, label=None, primary='false', rel=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.href = href
self.label = label
self.primary = primary
self.rel = rel
def WebsiteFromString(xml_string):
return atom.CreateClassFromXMLString(Website, xml_string)
class ExternalId(ContactsBase):
"""The Google Contacts ExternalId element."""
_tag = 'externalId'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['value'] = 'value'
def __init__(self, label=None, rel=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
self.value = value
def ExternalIdFromString(xml_string):
return atom.CreateClassFromXMLString(ExternalId, xml_string)
class Event(ContactsBase):
"""The Google Contacts Event element."""
_tag = 'event'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_children['{%s}when' % ContactsBase._namespace] = ('when', When)
def __init__(self, label=None, rel=None, when=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
self.when = when
def EventFromString(xml_string):
return atom.CreateClassFromXMLString(Event, xml_string)
class Deleted(GDataBase):
"""The Google Contacts Deleted element."""
_tag = 'deleted'
class GroupMembershipInfo(ContactsBase):
"""The Google Contacts GroupMembershipInfo element."""
_tag = 'groupMembershipInfo'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['deleted'] = 'deleted'
_attributes['href'] = 'href'
def __init__(self, deleted=None, href=None, text=None,
extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.deleted = deleted
self.href = href
class PersonEntry(gdata.BatchEntry):
"""Base class for ContactEntry and ProfileEntry."""
_children = gdata.BatchEntry._children.copy()
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = (
'organization', [Organization])
_children['{%s}phoneNumber' % gdata.GDATA_NAMESPACE] = (
'phone_number', [PhoneNumber])
_children['{%s}nickname' % CONTACTS_NAMESPACE] = ('nickname', Nickname)
_children['{%s}occupation' % CONTACTS_NAMESPACE] = ('occupation', Occupation)
_children['{%s}gender' % CONTACTS_NAMESPACE] = ('gender', Gender)
_children['{%s}birthday' % CONTACTS_NAMESPACE] = ('birthday', Birthday)
_children['{%s}postalAddress' % gdata.GDATA_NAMESPACE] = ('postal_address',
[PostalAddress])
_children['{%s}structuredPostalAddress' % gdata.GDATA_NAMESPACE] = (
'structured_pstal_address', [StructuredPostalAddress])
_children['{%s}email' % gdata.GDATA_NAMESPACE] = ('email', [Email])
_children['{%s}im' % gdata.GDATA_NAMESPACE] = ('im', [IM])
_children['{%s}relation' % CONTACTS_NAMESPACE] = ('relation', [Relation])
_children['{%s}userDefinedField' % CONTACTS_NAMESPACE] = (
'user_defined_field', [UserDefinedField])
_children['{%s}website' % CONTACTS_NAMESPACE] = ('website', [Website])
_children['{%s}externalId' % CONTACTS_NAMESPACE] = (
'external_id', [ExternalId])
_children['{%s}event' % CONTACTS_NAMESPACE] = ('event', [Event])
# The following line should be removed once the Python support
# for GData 2.0 is mature.
_attributes = gdata.BatchEntry._attributes.copy()
_attributes['{%s}etag' % gdata.GDATA_NAMESPACE] = 'etag'
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None, organization=None, phone_number=None,
nickname=None, occupation=None, gender=None, birthday=None,
postal_address=None, structured_pstal_address=None, email=None,
im=None, relation=None, user_defined_field=None, website=None,
external_id=None, event=None, batch_operation=None,
batch_id=None, batch_status=None, text=None,
extension_elements=None, extension_attributes=None, etag=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.organization = organization or []
self.phone_number = phone_number or []
self.nickname = nickname
self.occupation = occupation
self.gender = gender
self.birthday = birthday
self.postal_address = postal_address or []
self.structured_pstal_address = structured_pstal_address or []
self.email = email or []
self.im = im or []
self.relation = relation or []
self.user_defined_field = user_defined_field or []
self.website = website or []
self.external_id = external_id or []
self.event = event or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
# The following line should be removed once the Python support
# for GData 2.0 is mature.
self.etag = etag
class ContactEntry(PersonEntry):
"""A Google Contact flavor of an Atom Entry."""
_children = PersonEntry._children.copy()
_children['{%s}deleted' % gdata.GDATA_NAMESPACE] = ('deleted', Deleted)
_children['{%s}groupMembershipInfo' % CONTACTS_NAMESPACE] = (
'group_membership_info', [GroupMembershipInfo])
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [gdata.ExtendedProperty])
# Overwrite the organization rule in PersonEntry so that a ContactEntry
# may only contain one <gd:organization> element.
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = (
'organization', Organization)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None, organization=None, phone_number=None,
nickname=None, occupation=None, gender=None, birthday=None,
postal_address=None, structured_pstal_address=None, email=None,
im=None, relation=None, user_defined_field=None, website=None,
external_id=None, event=None, batch_operation=None,
batch_id=None, batch_status=None, text=None,
extension_elements=None, extension_attributes=None, etag=None,
deleted=None, extended_property=None,
group_membership_info=None):
PersonEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title, updated=updated,
organization=organization, phone_number=phone_number,
nickname=nickname, occupation=occupation,
gender=gender, birthday=birthday,
postal_address=postal_address,
structured_pstal_address=structured_pstal_address,
email=email, im=im, relation=relation,
user_defined_field=user_defined_field,
website=website, external_id=external_id, event=event,
batch_operation=batch_operation, batch_id=batch_id,
batch_status=batch_status, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes, etag=etag)
self.deleted = deleted
self.extended_property = extended_property or []
self.group_membership_info = group_membership_info or []
def GetPhotoLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_LINK_REL:
return a_link
return None
def GetPhotoEditLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_EDIT_LINK_REL:
return a_link
return None
def ContactEntryFromString(xml_string):
return atom.CreateClassFromXMLString(ContactEntry, xml_string)
class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Contacts feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ContactEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def ContactsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(ContactsFeed, xml_string)
class GroupEntry(gdata.BatchEntry):
"""Represents a contact group."""
_children = gdata.BatchEntry._children.copy()
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [gdata.ExtendedProperty])
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None,
rights=None, source=None, summary=None, control=None,
title=None, updated=None,
extended_property=None, batch_operation=None, batch_id=None,
batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.extended_property = extended_property or []
def GroupEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GroupEntry, xml_string)
class GroupsFeed(gdata.BatchFeed):
"""A Google contact groups feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GroupEntry])
def GroupsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GroupsFeed, xml_string)
class ProfileEntry(PersonEntry):
"""A Google Profiles flavor of an Atom Entry."""
def ProfileEntryFromString(xml_string):
"""Converts an XML string into a ProfileEntry object.
Args:
xml_string: string The XML describing a Profile entry.
Returns:
A ProfileEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileEntry, xml_string)
class ProfilesFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Profiles feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def ProfilesFeedFromString(xml_string):
"""Converts an XML string into a ProfilesFeed object.
Args:
xml_string: string The XML describing a Profiles feed.
Returns:
A ProfilesFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfilesFeed, xml_string)
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ContactsService extends the GDataService for Google Contacts operations.
ContactsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'dbrattli (Dag Brattli)'
import gdata
import gdata.calendar
import gdata.service
DEFAULT_BATCH_URL = ('http://www.google.com/m8/feeds/contacts/default/full'
'/batch')
DEFAULT_PROFILES_BATCH_URL = ('http://www.google.com'
'/m8/feeds/profiles/default/full/batch')
GDATA_VER_HEADER = 'GData-Version'
class Error(Exception):
pass
class RequestError(Error):
pass
class ContactsService(gdata.service.GDataService):
"""Client for the Google Contacts service."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com', additional_headers=None,
contact_list='default', **kwargs):
"""Creates a client for the Contacts service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'www.google.com'.
contact_list: string (optional) The name of the default contact list to
use when no URI is specified to the methods of the service.
Default value: 'default' (the logged in user's contact list).
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
self.contact_list = contact_list
gdata.service.GDataService.__init__(
self, email=email, password=password, service='cp', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetFeedUri(self, kind='contacts', contact_list=None, projection='full',
scheme=None):
"""Builds a feed URI.
Args:
kind: The type of feed to return, typically 'groups' or 'contacts'.
Default value: 'contacts'.
contact_list: The contact list to return a feed for.
Default value: self.contact_list.
projection: The projection to apply to the feed contents, for example
'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'.
scheme: The URL scheme such as 'http' or 'https', None to return a
relative URI without hostname.
Returns:
A feed URI using the given kind, contact list, and projection.
Example: '/m8/feeds/contacts/default/full'.
"""
contact_list = contact_list or self.contact_list
if kind == 'profiles':
contact_list = 'domain/%s' % contact_list
prefix = scheme and '%s://%s' % (scheme, self.server) or ''
return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection)
def GetContactsFeed(self, uri=None):
uri = uri or self.GetFeedUri()
return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString)
def GetContact(self, uri):
return self.Get(uri, converter=gdata.contacts.ContactEntryFromString)
def CreateContact(self, new_contact, insert_uri=None, url_params=None,
escape_params=True):
"""Adds an new contact to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new contact which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
insert_uri = insert_uri or self.GetFeedUri()
return self.Post(new_contact, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def UpdateContact(self, edit_uri, updated_contact, url_params=None,
escape_params=True):
"""Updates an existing contact.
Args:
edit_uri: string The edit link URI for the element being updated
updated_contact: string, atom.Entry or subclass containing
the Atom Entry which will replace the contact which is
stored at the edit_url
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Put(updated_contact, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def DeleteContact(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
"""Removes an contact with the specified ID from Google Contacts.
Args:
edit_uri: string The edit URL of the entry to be deleted. Example:
'/m8/feeds/contacts/default/full/xxx/yyy'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful delete, a httplib.HTTPResponse containing the server's
response to the DELETE request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
def GetGroupsFeed(self, uri=None):
uri = uri or self.GetFeedUri('groups')
return self.Get(uri, converter=gdata.contacts.GroupsFeedFromString)
def CreateGroup(self, new_group, insert_uri=None, url_params=None,
escape_params=True):
insert_uri = insert_uri or self.GetFeedUri('groups')
return self.Post(new_group, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.GroupEntryFromString)
def UpdateGroup(self, edit_uri, updated_group, url_params=None,
escape_params=True):
return self.Put(updated_group, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.GroupEntryFromString)
def DeleteGroup(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
def ChangePhoto(self, media, contact_entry_or_url, content_type=None,
content_length=None):
"""Change the photo for the contact by uploading a new photo.
Performs a PUT against the photo edit URL to send the binary data for the
photo.
Args:
media: filename, file-like-object, or a gdata.MediaSource object to send.
contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this
method will search for an edit photo link URL and
perform a PUT to the URL.
content_type: str (optional) the mime type for the photo data. This is
necessary if media is a file or file name, but if media
is a MediaSource object then the media object can contain
the mime type. If media_type is set, it will override the
mime type in the media object.
content_length: int or str (optional) Specifying the content length is
only required if media is a file-like object. If media
is a filename, the length is determined using
os.path.getsize. If media is a MediaSource object, it is
assumed that it already contains the content length.
"""
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if isinstance(media, gdata.MediaSource):
payload = media
# If the media object is a file-like object, then use it as the file
# handle in the in the MediaSource.
elif hasattr(media, 'read'):
payload = gdata.MediaSource(file_handle=media,
content_type=content_type, content_length=content_length)
# Assume that the media object is a file name.
else:
payload = gdata.MediaSource(content_type=content_type,
content_length=content_length, file_path=media)
return self.Put(payload, url)
def GetPhoto(self, contact_entry_or_url):
"""Retrives the binary data for the contact's profile photo as a string.
Args:
contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string
containing the photo link's URL. If the contact entry does not
contain a photo link, the image will not be fetched and this method
will return None.
"""
# TODO: add the ability to write out the binary image data to a file,
# reading and writing a chunk at a time to avoid potentially using up
# large amounts of memory.
url = None
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
photo_link = contact_entry_or_url.GetPhotoLink()
if photo_link:
url = photo_link.href
else:
url = contact_entry_or_url
if url:
return self.Get(url, converter=str)
else:
return None
def DeletePhoto(self, contact_entry_or_url):
url = None
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if url:
self.Delete(url)
def GetProfilesFeed(self, uri=None):
"""Retrieves a feed containing all domain's profiles.
Args:
uri: string (optional) the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full
Returns:
On success, a ProfilesFeed containing the profiles.
On failure, raises a RequestError.
"""
uri = uri or self.GetFeedUri('profiles')
return self.Get(uri,
converter=gdata.contacts.ProfilesFeedFromString)
def GetProfile(self, uri):
"""Retrieves a domain's profile for the user.
Args:
uri: string the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full/username
Returns:
On success, a ProfileEntry containing the profile for the user.
On failure, raises a RequestError
"""
return self.Get(uri,
converter=gdata.contacts.ProfileEntryFromString)
def UpdateProfile(self, edit_uri, updated_profile, url_params=None,
escape_params=True):
"""Updates an existing profile.
Args:
edit_uri: string The edit link URI for the element being updated
updated_profile: string atom.Entry or subclass containing
the Atom Entry which will replace the profile which is
stored at the edit_url.
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_params will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, raises a RequestError.
"""
return self.Put(updated_profile, self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params,
converter=gdata.contacts.ProfileEntryFromString)
def ExecuteBatch(self, batch_feed, url,
converter=gdata.contacts.ContactsFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.contacts.ContactFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: str The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is ContactsFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ContactsFeed.
"""
return self.Post(batch_feed, url, converter=converter)
def ExecuteBatchProfiles(self, batch_feed, url,
converter=gdata.contacts.ProfilesFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.profiles.ProfilesFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: string The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is
gdata.profiles.ProfilesFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ProfilesFeed.
"""
return self.Post(batch_feed, url, converter=converter)
def _CleanUri(self, uri):
"""Sanitizes a feed URI.
Args:
uri: The URI to sanitize, can be relative or absolute.
Returns:
The given URI without its http://server prefix, if any.
Keeps the leading slash of the URI.
"""
url_prefix = 'http://%s' % self.server
if uri.startswith(url_prefix):
uri = uri[len(url_prefix):]
return uri
class ContactsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None, group=None):
self.feed = feed or '/m8/feeds/contacts/default/full'
if group:
self._SetGroup(group)
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
def _GetGroup(self):
if 'group' in self:
return self['group']
else:
return None
def _SetGroup(self, group_id):
self['group'] = group_id
group = property(_GetGroup, _SetGroup,
doc='The group query parameter to find only contacts in this group')
class GroupsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/groups/default/full'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
class ProfilesQuery(gdata.service.Query):
"""Constructs a query object for the profiles feed."""
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/profiles/default/full'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the DocList Data API"""
__author__ = 'e.bidelman (Eric Bidelman)'
import re
import atom.core
import atom.data
import gdata.acl.data
import gdata.data
DOCUMENTS_NS = 'http://schemas.google.com/docs/2007'
DOCUMENTS_TEMPLATE = '{http://schemas.google.com/docs/2007}%s'
ACL_FEEDLINK_REL = 'http://schemas.google.com/acl/2007#accessControlList'
REVISION_FEEDLINK_REL = DOCUMENTS_NS + '/revisions'
# XML Namespaces used in Google Documents entities.
DATA_KIND_SCHEME = 'http://schemas.google.com/g/2005#kind'
DOCUMENT_LABEL = 'document'
SPREADSHEET_LABEL = 'spreadsheet'
PRESENTATION_LABEL = 'presentation'
FOLDER_LABEL = 'folder'
PDF_LABEL = 'pdf'
LABEL_SCHEME = 'http://schemas.google.com/g/2005/labels'
STARRED_LABEL_TERM = LABEL_SCHEME + '#starred'
TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed'
HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden'
MINE_LABEL_TERM = LABEL_SCHEME + '#mine'
PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private'
SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain'
VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed'
DOCS_PARENT_LINK_REL = DOCUMENTS_NS + '#parent'
DOCS_PUBLISH_LINK_REL = DOCUMENTS_NS + '#publish'
FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)')
RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)([\w-]*)$')
# File extension/mimetype pairs of common format.
MIMETYPES = {
'CSV': 'text/csv',
'TSV': 'text/tab-separated-values',
'TAB': 'text/tab-separated-values',
'DOC': 'application/msword',
'DOCX': ('application/vnd.openxmlformats-officedocument.'
'wordprocessingml.document'),
'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet',
'ODT': 'application/vnd.oasis.opendocument.text',
'RTF': 'application/rtf',
'SXW': 'application/vnd.sun.xml.writer',
'TXT': 'text/plain',
'XLS': 'application/vnd.ms-excel',
'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'PDF': 'application/pdf',
'PNG': 'image/png',
'PPT': 'application/vnd.ms-powerpoint',
'PPS': 'application/vnd.ms-powerpoint',
'HTM': 'text/html',
'HTML': 'text/html',
'ZIP': 'application/zip',
'SWF': 'application/x-shockwave-flash'
}
def make_kind_category(label):
"""Builds the appropriate atom.data.Category for the label passed in.
Args:
label: str The value for the category entry.
Returns:
An atom.data.Category or None if label is None.
"""
if label is None:
return None
return atom.data.Category(
scheme=DATA_KIND_SCHEME, term='%s#%s' % (DOCUMENTS_NS, label), label=label)
MakeKindCategory = make_kind_category
def make_content_link_from_resource_id(resource_id):
"""Constructs export URL for a given resource.
Args:
resource_id: str The document/item's resource id. Example presentation:
'presentation%3A0A1234567890'.
Raises:
gdata.client.ValueError if the resource_id is not a valid format.
"""
match = RESOURCE_ID_PATTERN.match(resource_id)
if match:
label = match.group(1)
doc_id = match.group(3)
if label == DOCUMENT_LABEL:
return '/feeds/download/documents/Export?docId=%s' % doc_id
if label == PRESENTATION_LABEL:
return '/feeds/download/presentations/Export?docId=%s' % doc_id
if label == SPREADSHEET_LABEL:
return ('http://spreadsheets.google.com/feeds/download/spreadsheets/'
'Export?key=%s' % doc_id)
raise ValueError, ('Invalid resource id: %s, or manually creating the '
'download url for this type of doc is not possible'
% resource_id)
MakeContentLinkFromResourceId = make_content_link_from_resource_id
class ResourceId(atom.core.XmlElement):
"""The DocList gd:resourceId element."""
_qname = gdata.data.GDATA_TEMPLATE % 'resourceId'
class LastModifiedBy(atom.data.Person):
"""The DocList gd:lastModifiedBy element."""
_qname = gdata.data.GDATA_TEMPLATE % 'lastModifiedBy'
class LastViewed(atom.data.Person):
"""The DocList gd:lastViewed element."""
_qname = gdata.data.GDATA_TEMPLATE % 'lastViewed'
class WritersCanInvite(atom.core.XmlElement):
"""The DocList docs:writersCanInvite element."""
_qname = DOCUMENTS_TEMPLATE % 'writersCanInvite'
value = 'value'
class QuotaBytesUsed(atom.core.XmlElement):
"""The DocList gd:quotaBytesUsed element."""
_qname = gdata.data.GDATA_TEMPLATE % 'quotaBytesUsed'
class Publish(atom.core.XmlElement):
"""The DocList docs:publish element."""
_qname = DOCUMENTS_TEMPLATE % 'publish'
value = 'value'
class PublishAuto(atom.core.XmlElement):
"""The DocList docs:publishAuto element."""
_qname = DOCUMENTS_TEMPLATE % 'publishAuto'
value = 'value'
class PublishOutsideDomain(atom.core.XmlElement):
"""The DocList docs:publishOutsideDomain element."""
_qname = DOCUMENTS_TEMPLATE % 'publishOutsideDomain'
value = 'value'
class DocsEntry(gdata.data.GDEntry):
"""A DocList version of an Atom Entry."""
last_viewed = LastViewed
last_modified_by = LastModifiedBy
resource_id = ResourceId
writers_can_invite = WritersCanInvite
quota_bytes_used = QuotaBytesUsed
feed_link = [gdata.data.FeedLink]
def get_document_type(self):
"""Extracts the type of document this DocsEntry is.
This method returns the type of document the DocsEntry represents. Possible
values are document, presentation, spreadsheet, folder, or pdf.
Returns:
A string representing the type of document.
"""
if self.category:
for category in self.category:
if category.scheme == DATA_KIND_SCHEME:
return category.label
else:
return None
GetDocumentType = get_document_type
def get_acl_feed_link(self):
"""Extracts the DocsEntry's ACL feed <gd:feedLink>.
Returns:
A gdata.data.FeedLink object.
"""
for feed_link in self.feed_link:
if feed_link.rel == ACL_FEEDLINK_REL:
return feed_link
return None
GetAclFeedLink = get_acl_feed_link
def get_revisions_feed_link(self):
"""Extracts the DocsEntry's revisions feed <gd:feedLink>.
Returns:
A gdata.data.FeedLink object.
"""
for feed_link in self.feed_link:
if feed_link.rel == REVISION_FEEDLINK_REL:
return feed_link
return None
GetRevisionsFeedLink = get_revisions_feed_link
def in_folders(self):
"""Returns the parents link(s) (folders) of this entry."""
links = []
for link in self.link:
if link.rel == DOCS_PARENT_LINK_REL and link.href:
links.append(link)
return links
InFolders = in_folders
class Acl(gdata.acl.data.AclEntry):
"""A document ACL entry."""
class DocList(gdata.data.GDFeed):
"""The main DocList feed containing a list of Google Documents."""
entry = [DocsEntry]
class AclFeed(gdata.acl.data.AclFeed):
"""A DocList ACL feed."""
entry = [Acl]
class Revision(gdata.data.GDEntry):
"""A document Revision entry."""
publish = Publish
publish_auto = PublishAuto
publish_outside_domain = PublishOutsideDomain
def find_publish_link(self):
"""Get the link that points to the published document on the web.
Returns:
A str for the URL in the link with a rel ending in #publish.
"""
return self.find_url(DOCS_PUBLISH_LINK_REL)
FindPublishLink = find_publish_link
def get_publish_link(self):
"""Get the link that points to the published document on the web.
Returns:
A gdata.data.Link for the link with a rel ending in #publish.
"""
return self.get_link(DOCS_PUBLISH_LINK_REL)
GetPublishLink = get_publish_link
class RevisionFeed(gdata.data.GDFeed):
"""A DocList Revision feed."""
entry = [Revision]
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DocsClient extends gdata.client.GDClient to streamline DocList API calls."""
__author__ = 'e.bidelman (Eric Bidelman)'
import mimetypes
import urllib
import atom.data
import atom.http_core
import gdata.client
import gdata.docs.data
import gdata.gauth
# Feed URI templates
DOCLIST_FEED_URI = '/feeds/default/private/full/'
FOLDERS_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/contents'
ACL_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/acl'
REVISIONS_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/revisions'
class DocsClient(gdata.client.GDClient):
"""Client extension for the Google Documents List API."""
host = 'docs.google.com' # default server for the API
api_version = '3.0' # default major version for the service.
auth_service = 'writely'
auth_scopes = gdata.gauth.AUTH_SCOPES['writely']
def __init__(self, auth_token=None, **kwargs):
"""Constructs a new client for the DocList API.
Args:
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: The other parameters to pass to gdata.client.GDClient constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
def get_file_content(self, uri, auth_token=None, **kwargs):
"""Fetches the file content from the specified uri.
This method is useful for downloading/exporting a file within enviornments
like Google App Engine, where the user does not have the ability to write
the file to a local disk.
Args:
uri: str The full URL to fetch the file contents from.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.request().
Returns:
The binary file content.
Raises:
gdata.client.RequestError: on error response from server.
"""
server_response = self.request('GET', uri, auth_token=auth_token, **kwargs)
if server_response.status != 200:
raise gdata.client.RequestError, {'status': server_response.status,
'reason': server_response.reason,
'body': server_response.read()}
return server_response.read()
GetFileContent = get_file_content
def _download_file(self, uri, file_path, auth_token=None, **kwargs):
"""Downloads a file to disk from the specified URI.
Note: to download a file in memory, use the GetFileContent() method.
Args:
uri: str The full URL to download the file from.
file_path: str The full path to save the file to.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_file_content().
Raises:
gdata.client.RequestError: on error response from server.
"""
f = open(file_path, 'wb')
try:
f.write(self.get_file_content(uri, auth_token=auth_token, **kwargs))
except gdata.client.RequestError, e:
f.close()
raise e
f.flush()
f.close()
_DownloadFile = _download_file
def get_doclist(self, uri=None, limit=None, auth_token=None, **kwargs):
"""Retrieves the main doclist feed containing the user's items.
Args:
uri: str (optional) A URI to query the doclist feed.
limit: int (optional) A maximum cap for the number of results to
return in the feed. By default, the API returns a maximum of 100
per page. Thus, if you set limit=5000, you will get <= 5000
documents (guarenteed no more than 5000), and will need to follow the
feed's next links (feed.GetNextLink()) to the rest. See
get_everything(). Similarly, if you set limit=50, only <= 50
documents are returned. Note: if the max-results parameter is set in
the uri parameter, it is chosen over a value set for limit.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_feed().
Returns:
gdata.docs.data.DocList feed.
"""
if uri is None:
uri = DOCLIST_FEED_URI
if isinstance(uri, (str, unicode)):
uri = atom.http_core.Uri.parse_uri(uri)
# Add max-results param if it wasn't included in the uri.
if limit is not None and not 'max-results' in uri.query:
uri.query['max-results'] = limit
return self.get_feed(uri, desired_class=gdata.docs.data.DocList,
auth_token=auth_token, **kwargs)
GetDocList = get_doclist
def get_doc(self, resource_id, etag=None, auth_token=None, **kwargs):
"""Retrieves a particular document given by its resource id.
Args:
resource_id: str The document/item's resource id. Example spreadsheet:
'spreadsheet%3A0A1234567890'.
etag: str (optional) The document/item's etag value to be used in a
conditional GET. See http://code.google.com/apis/documents/docs/3.0/
developers_guide_protocol.html#RetrievingCached.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_entry().
Returns:
A gdata.docs.data.DocsEntry object representing the retrieved entry.
Raises:
ValueError if the resource_id is not a valid format.
"""
match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id)
if match is None:
raise ValueError, 'Invalid resource id: %s' % resource_id
return self.get_entry(
DOCLIST_FEED_URI + resource_id, etag=etag,
desired_class=gdata.docs.data.DocsEntry,
auth_token=auth_token, **kwargs)
GetDoc = get_doc
def get_everything(self, uri=None, auth_token=None, **kwargs):
"""Retrieves the user's entire doc list.
The method makes multiple HTTP requests (by following the feed's next links)
in order to fetch the user's entire document list.
Args:
uri: str (optional) A URI to query the doclist feed with.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.GetDocList().
Returns:
A list of gdata.docs.data.DocsEntry objects representing the retrieved
entries.
"""
if uri is None:
uri = DOCLIST_FEED_URI
feed = self.GetDocList(uri=uri, auth_token=auth_token, **kwargs)
entries = feed.entry
while feed.GetNextLink() is not None:
feed = self.GetDocList(
feed.GetNextLink().href, auth_token=auth_token, **kwargs)
entries.extend(feed.entry)
return entries
GetEverything = get_everything
def get_acl_permissions(self, resource_id, auth_token=None, **kwargs):
"""Retrieves a the ACL sharing permissions for a document.
Args:
resource_id: str The document/item's resource id. Example for pdf:
'pdf%3A0A1234567890'.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_feed().
Returns:
A gdata.docs.data.AclFeed object representing the document's ACL entries.
Raises:
ValueError if the resource_id is not a valid format.
"""
match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id)
if match is None:
raise ValueError, 'Invalid resource id: %s' % resource_id
return self.get_feed(
ACL_FEED_TEMPLATE % resource_id, desired_class=gdata.docs.data.AclFeed,
auth_token=auth_token, **kwargs)
GetAclPermissions = get_acl_permissions
def get_revisions(self, resource_id, auth_token=None, **kwargs):
"""Retrieves the revision history for a document.
Args:
resource_id: str The document/item's resource id. Example for pdf:
'pdf%3A0A1234567890'.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_feed().
Returns:
A gdata.docs.data.RevisionFeed representing the document's revisions.
Raises:
ValueError if the resource_id is not a valid format.
"""
match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id)
if match is None:
raise ValueError, 'Invalid resource id: %s' % resource_id
return self.get_feed(
REVISIONS_FEED_TEMPLATE % resource_id,
desired_class=gdata.docs.data.RevisionFeed, auth_token=auth_token,
**kwargs)
GetRevisions = get_revisions
def create(self, doc_type, title, folder_or_id=None, writers_can_invite=None,
auth_token=None, **kwargs):
"""Creates a new item in the user's doclist.
Args:
doc_type: str The type of object to create. For example: 'document',
'spreadsheet', 'folder', 'presentation'.
title: str A title for the document.
folder_or_id: gdata.docs.data.DocsEntry or str (optional) Folder entry or
the resouce id of a folder to create the object under. Note: A valid
resource id for a folder is of the form: folder%3Afolder_id.
writers_can_invite: bool (optional) False prevents collaborators from
being able to invite others to edit or view the document.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.post().
Returns:
gdata.docs.data.DocsEntry containing information newly created item.
"""
entry = gdata.docs.data.DocsEntry(title=atom.data.Title(text=title))
entry.category.append(gdata.docs.data.make_kind_category(doc_type))
if isinstance(writers_can_invite, gdata.docs.data.WritersCanInvite):
entry.writers_can_invite = writers_can_invite
elif isinstance(writers_can_invite, bool):
entry.writers_can_invite = gdata.docs.data.WritersCanInvite(
value=str(writers_can_invite).lower())
uri = DOCLIST_FEED_URI
if folder_or_id is not None:
if isinstance(folder_or_id, gdata.docs.data.DocsEntry):
# Verify that we're uploading the resource into to a folder.
if folder_or_id.get_document_type() == gdata.docs.data.FOLDER_LABEL:
uri = folder_or_id.content.src
else:
raise gdata.client.Error, 'Trying to upload item to a non-folder.'
else:
uri = FOLDERS_FEED_TEMPLATE % folder_or_id
return self.post(entry, uri, auth_token=auth_token, **kwargs)
Create = create
def copy(self, source_entry, title, auth_token=None, **kwargs):
"""Copies a native Google document, spreadsheet, or presentation.
Note: arbitrary file types and PDFs do not support this feature.
Args:
source_entry: gdata.docs.data.DocsEntry An object representing the source
document/folder.
title: str A title for the new document.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.post().
Returns:
A gdata.docs.data.DocsEntry of the duplicated document.
"""
entry = gdata.docs.data.DocsEntry(
title=atom.data.Title(text=title),
id=atom.data.Id(text=source_entry.GetSelfLink().href))
return self.post(entry, DOCLIST_FEED_URI, auth_token=auth_token, **kwargs)
Copy = copy
def move(self, source_entry, folder_entry=None,
keep_in_folders=False, auth_token=None, **kwargs):
"""Moves an item into a different folder (or to the root document list).
Args:
source_entry: gdata.docs.data.DocsEntry An object representing the source
document/folder.
folder_entry: gdata.docs.data.DocsEntry (optional) An object representing
the destination folder. If None, set keep_in_folders to
True to remove the item from all parent folders.
keep_in_folders: boolean (optional) If True, the source entry
is not removed from any existing parent folders it is in.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.post().
Returns:
A gdata.docs.data.DocsEntry of the moved entry or True if just moving the
item out of all folders (e.g. Move(source_entry)).
"""
entry = gdata.docs.data.DocsEntry(id=source_entry.id)
# Remove the item from any folders it is already in.
if not keep_in_folders:
for folder in source_entry.InFolders():
self.delete(
'%s/contents/%s' % (folder.href, source_entry.resource_id.text),
force=True)
# If we're moving the resource into a folder, verify it is a folder entry.
if folder_entry is not None:
if folder_entry.get_document_type() == gdata.docs.data.FOLDER_LABEL:
return self.post(entry, folder_entry.content.src,
auth_token=auth_token, **kwargs)
else:
raise gdata.client.Error, 'Trying to move item into a non-folder.'
return True
Move = move
def upload(self, media, title, folder_or_uri=None, content_type=None,
auth_token=None, **kwargs):
"""Uploads a file to Google Docs.
Args:
media: A gdata.data.MediaSource object containing the file to be
uploaded or a string of the filepath.
title: str The title of the document on the server after being
uploaded.
folder_or_uri: gdata.docs.data.DocsEntry or str (optional) An object with
a link to the folder or the uri to upload the file to.
Note: A valid uri for a folder is of the form:
/feeds/default/private/full/folder%3Afolder_id/contents
content_type: str (optional) The file's mimetype. If not provided, the
one in the media source object is used or the mimetype is inferred
from the filename (if media is a string). When media is a filename,
it is always recommended to pass in a content type.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.post().
Returns:
A gdata.docs.data.DocsEntry containing information about uploaded doc.
"""
uri = None
if folder_or_uri is not None:
if isinstance(folder_or_uri, gdata.docs.data.DocsEntry):
# Verify that we're uploading the resource into to a folder.
if folder_or_uri.get_document_type() == gdata.docs.data.FOLDER_LABEL:
uri = folder_or_uri.content.src
else:
raise gdata.client.Error, 'Trying to upload item to a non-folder.'
else:
uri = folder_or_uri
else:
uri = DOCLIST_FEED_URI
# Create media source if media is a filepath.
if isinstance(media, (str, unicode)):
mimetype = mimetypes.guess_type(media)[0]
if mimetype is None and content_type is None:
raise ValueError, ("Unknown mimetype. Please pass in the file's "
"content_type")
else:
media = gdata.data.MediaSource(file_path=media,
content_type=content_type)
entry = gdata.docs.data.DocsEntry(title=atom.data.Title(text=title))
return self.post(entry, uri, media_source=media,
desired_class=gdata.docs.data.DocsEntry,
auth_token=auth_token, **kwargs)
Upload = upload
def download(self, entry_or_id_or_url, file_path, extra_params=None,
auth_token=None, **kwargs):
"""Downloads a file from the Document List to local disk.
Note: to download a file in memory, use the GetFileContent() method.
Args:
entry_or_id_or_url: gdata.docs.data.DocsEntry or string representing a
resource id or URL to download the document from (such as the content
src link).
file_path: str The full path to save the file to.
extra_params: dict (optional) A map of any further parameters to control
how the document is downloaded/exported. For example, exporting a
spreadsheet as a .csv: extra_params={'gid': 0, 'exportFormat': 'csv'}
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self._download_file().
Raises:
gdata.client.RequestError if the download URL is malformed or the server's
response was not successful.
ValueError if entry_or_id_or_url was a resource id for a filetype
in which the download link cannot be manually constructed (e.g. pdf).
"""
if isinstance(entry_or_id_or_url, gdata.docs.data.DocsEntry):
url = entry_or_id_or_url.content.src
else:
if gdata.docs.data.RESOURCE_ID_PATTERN.match(entry_or_id_or_url):
url = gdata.docs.data.make_content_link_from_resource_id(
entry_or_id_or_url)
else:
url = entry_or_id_or_url
if extra_params is not None:
if 'exportFormat' in extra_params and url.find('/Export?') == -1:
raise gdata.client.Error, ('This entry type cannot be exported '
'as a different format.')
if 'gid' in extra_params and url.find('spreadsheets') == -1:
raise gdata.client.Error, 'gid param is not valid for this doc type.'
url += '&' + urllib.urlencode(extra_params)
self._download_file(url, file_path, auth_token=auth_token, **kwargs)
Download = download
def export(self, entry_or_id_or_url, file_path, gid=None, auth_token=None,
**kwargs):
"""Exports a document from the Document List in a different format.
Args:
entry_or_id_or_url: gdata.docs.data.DocsEntry or string representing a
resource id or URL to download the document from (such as the content
src link).
file_path: str The full path to save the file to. The export
format is inferred from the the file extension.
gid: str (optional) grid id for downloading a single grid of a
spreadsheet. The param should only be used for .csv and .tsv
spreadsheet exports.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.download().
Raises:
gdata.client.RequestError if the download URL is malformed or the server's
response was not successful.
"""
extra_params = {}
match = gdata.docs.data.FILE_EXT_PATTERN.match(file_path)
if match:
extra_params['exportFormat'] = match.group(1)
if gid is not None:
extra_params['gid'] = gid
self.download(entry_or_id_or_url, file_path, extra_params,
auth_token=auth_token, **kwargs)
Export = export
class DocsQuery(gdata.client.Query):
def __init__(self, title=None, title_exact=None, opened_min=None,
opened_max=None, edited_min=None, edited_max=None, owner=None,
writer=None, reader=None, show_folders=None,
show_deleted=None, ocr=None, target_language=None,
source_language=None, convert=None, **kwargs):
"""Constructs a query URL for the Google Documents List API.
Args:
title: str (optional) Specifies the search terms for the title of a
document. This parameter used without title_exact will only
submit partial queries, not exact queries.
title_exact: str (optional) Meaningless without title. Possible values
are 'true' and 'false'. Note: Matches are case-insensitive.
opened_min: str (optional) Lower bound on the last time a document was
opened by the current user. Use the RFC 3339 timestamp
format. For example: opened_min='2005-08-09T09:57:00-08:00'.
opened_max: str (optional) Upper bound on the last time a document was
opened by the current user. (See also opened_min.)
edited_min: str (optional) Lower bound on the last time a document was
edited by the current user. This value corresponds to the
edited.text value in the doc's entry object, which
represents changes to the document's content or metadata.
Use the RFC 3339 timestamp format. For example:
edited_min='2005-08-09T09:57:00-08:00'
edited_max: str (optional) Upper bound on the last time a document was
edited by the user. (See also edited_min.)
owner: str (optional) Searches for documents with a specific owner. Use
the email address of the owner. For example:
owner='user@gmail.com'
writer: str (optional) Searches for documents which can be written to
by specific users. Use a single email address or a comma
separated list of email addresses. For example:
writer='user1@gmail.com,user@example.com'
reader: str (optional) Searches for documents which can be read by
specific users. (See also writer.)
show_folders: str (optional) Specifies whether the query should return
folders as well as documents. Possible values are 'true'
and 'false'. Default is false.
show_deleted: str (optional) Specifies whether the query should return
documents which are in the trash as well as other
documents. Possible values are 'true' and 'false'.
Default is false.
ocr: str (optional) Specifies whether to attempt OCR on a .jpg, .png, or
.gif upload. Possible values are 'true' and 'false'. Default is
false. See OCR in the Protocol Guide:
http://code.google.com/apis/documents/docs/3.0/developers_guide_protocol.html#OCR
target_language: str (optional) Specifies the language to translate a
document into. See Document Translation in the Protocol
Guide for a table of possible values:
http://code.google.com/apis/documents/docs/3.0/developers_guide_protocol.html#DocumentTranslation
source_language: str (optional) Specifies the source language of the
original document. Optional when using the translation
service. If not provided, Google will attempt to
auto-detect the source language. See Document
Translation in the Protocol Guide for a table of
possible values (link in target_language).
convert: str (optional) Used when uploading arbitrary file types to
specity if document-type uploads should convert to a native
Google Docs format. Possible values are 'true' and 'false'.
The default is 'true'.
"""
gdata.client.Query.__init__(self, **kwargs)
self.convert = convert
self.title = title
self.title_exact = title_exact
self.opened_min = opened_min
self.opened_max = opened_max
self.edited_min = edited_min
self.edited_max = edited_max
self.owner = owner
self.writer = writer
self.reader = reader
self.show_folders = show_folders
self.show_deleted = show_deleted
self.ocr = ocr
self.target_language = target_language
self.source_language = source_language
def modify_request(self, http_request):
gdata.client._add_query_param('convert', self.convert, http_request)
gdata.client._add_query_param('title', self.title, http_request)
gdata.client._add_query_param('title-exact', self.title_exact,
http_request)
gdata.client._add_query_param('opened-min', self.opened_min, http_request)
gdata.client._add_query_param('opened-max', self.opened_max, http_request)
gdata.client._add_query_param('edited-min', self.edited_min, http_request)
gdata.client._add_query_param('edited-max', self.edited_max, http_request)
gdata.client._add_query_param('owner', self.owner, http_request)
gdata.client._add_query_param('writer', self.writer, http_request)
gdata.client._add_query_param('reader', self.reader, http_request)
gdata.client._add_query_param('showfolders', self.show_folders,
http_request)
gdata.client._add_query_param('showdeleted', self.show_deleted,
http_request)
gdata.client._add_query_param('ocr', self.ocr, http_request)
gdata.client._add_query_param('targetLanguage', self.target_language,
http_request)
gdata.client._add_query_param('sourceLanguage', self.source_language,
http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Documents."""
__author__ = ('api.jfisher (Jeff Fisher), '
'api.eric@google.com (Eric Bidelman)')
import atom
import gdata
DOCUMENTS_NAMESPACE = 'http://schemas.google.com/docs/2007'
class Scope(atom.AtomBase):
"""The DocList ACL scope element"""
_tag = 'scope'
_namespace = gdata.GACL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
_attributes['type'] = 'type'
def __init__(self, value=None, type=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.type = type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Role(atom.AtomBase):
"""The DocList ACL role element"""
_tag = 'role'
_namespace = gdata.GACL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class FeedLink(atom.AtomBase):
"""The DocList gd:feedLink element"""
_tag = 'feedLink'
_namespace = gdata.GDATA_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['href'] = 'href'
def __init__(self, href=None, rel=None, text=None, extension_elements=None,
extension_attributes=None):
self.href = href
self.rel = rel
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class ResourceId(atom.AtomBase):
"""The DocList gd:resourceId element"""
_tag = 'resourceId'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class LastModifiedBy(atom.Person):
"""The DocList gd:lastModifiedBy element"""
_tag = 'lastModifiedBy'
_namespace = gdata.GDATA_NAMESPACE
class LastViewed(atom.Person):
"""The DocList gd:lastViewed element"""
_tag = 'lastViewed'
_namespace = gdata.GDATA_NAMESPACE
class WritersCanInvite(atom.AtomBase):
"""The DocList docs:writersCanInvite element"""
_tag = 'writersCanInvite'
_namespace = DOCUMENTS_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
class DocumentListEntry(gdata.GDataEntry):
"""The Google Documents version of an Atom Entry"""
_tag = gdata.GDataEntry._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feedLink', FeedLink)
_children['{%s}resourceId' % gdata.GDATA_NAMESPACE] = ('resourceId',
ResourceId)
_children['{%s}lastModifiedBy' % gdata.GDATA_NAMESPACE] = ('lastModifiedBy',
LastModifiedBy)
_children['{%s}lastViewed' % gdata.GDATA_NAMESPACE] = ('lastViewed',
LastViewed)
_children['{%s}writersCanInvite' % DOCUMENTS_NAMESPACE] = (
'writersCanInvite', WritersCanInvite)
def __init__(self, resourceId=None, feedLink=None, lastViewed=None,
lastModifiedBy=None, writersCanInvite=None, author=None,
category=None, content=None, atom_id=None, link=None,
published=None, title=None, updated=None, text=None,
extension_elements=None, extension_attributes=None):
self.feedLink = feedLink
self.lastViewed = lastViewed
self.lastModifiedBy = lastModifiedBy
self.resourceId = resourceId
self.writersCanInvite = writersCanInvite
gdata.GDataEntry.__init__(
self, author=author, category=category, content=content,
atom_id=atom_id, link=link, published=published, title=title,
updated=updated, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
def GetAclLink(self):
"""Extracts the DocListEntry's <gd:feedLink>.
Returns:
A FeedLink object.
"""
return self.feedLink
def GetDocumentType(self):
"""Extracts the type of document from the DocListEntry.
This method returns the type of document the DocListEntry
represents. Possible values are document, presentation,
spreadsheet, folder, or pdf.
Returns:
A string representing the type of document.
"""
if self.category:
for category in self.category:
if category.scheme == gdata.GDATA_NAMESPACE + '#kind':
return category.label
else:
return None
def DocumentListEntryFromString(xml_string):
"""Converts an XML string into a DocumentListEntry object.
Args:
xml_string: string The XML describing a Document List feed entry.
Returns:
A DocumentListEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(DocumentListEntry, xml_string)
class DocumentListAclEntry(gdata.GDataEntry):
"""A DocList ACL Entry flavor of an Atom Entry"""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}scope' % gdata.GACL_NAMESPACE] = ('scope', Scope)
_children['{%s}role' % gdata.GACL_NAMESPACE] = ('role', Role)
def __init__(self, category=None, atom_id=None, link=None,
title=None, updated=None, scope=None, role=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=None, category=category,
content=None, atom_id=atom_id, link=link,
published=None, title=title,
updated=updated, text=None)
self.scope = scope
self.role = role
def DocumentListAclEntryFromString(xml_string):
"""Converts an XML string into a DocumentListAclEntry object.
Args:
xml_string: string The XML describing a Document List ACL feed entry.
Returns:
A DocumentListAclEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(DocumentListAclEntry, xml_string)
class DocumentListFeed(gdata.GDataFeed):
"""A feed containing a list of Google Documents Items"""
_tag = gdata.GDataFeed._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[DocumentListEntry])
def DocumentListFeedFromString(xml_string):
"""Converts an XML string into a DocumentListFeed object.
Args:
xml_string: string The XML describing a DocumentList feed.
Returns:
A DocumentListFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(DocumentListFeed, xml_string)
class DocumentListAclFeed(gdata.GDataFeed):
"""A DocList ACL feed flavor of a Atom feed"""
_tag = gdata.GDataFeed._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[DocumentListAclEntry])
def DocumentListAclFeedFromString(xml_string):
"""Converts an XML string into a DocumentListAclFeed object.
Args:
xml_string: string The XML describing a DocumentList feed.
Returns:
A DocumentListFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(DocumentListAclFeed, xml_string)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.