code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# Populate the database for the SQL injection demo,
# it creates a DBS directory in the current working dir.
# Requires Gadfly on the python path, and a --allworkingmodules --oldstyle
# pypy-c.
# Passwords for the demo are just the reverse of user names.
import md5
import sys, os
import random
os.mkdir("DBS")
import gadfly
conn = gadfly.gadfly()
conn.startup("db0", "DBS")
names = ['bob', 'jamie', 'david', 'monica', 'rose', 'anna']
def make_pwd(name):
rname = list(name)
rname.reverse()
rname = ''.join(rname)
return md5.new(rname).hexdigest()
pwds = [make_pwd(name) for name in names]
products = [('superglue', 10.0, 5),
('pink wallpaper', 25.0, 20),
('red wallpaper', 20.0, 20),
('gray wallpaper', 15.0, 20),
('white wallpaper', 15.0, 20),
('green wallpaper', 20.0, 20) ]
cursor = conn.cursor()
cursor.execute("""create table purchases (pwd varchar, user varchar,
month integer, year integer,
product varchar,
qty integer,
amount float)
""")
ins = "insert into purchases values (?,?,?,2007,?,?,?)"
for i in range(15):
uid = random.randrange(0, len(names))
pwd = pwds[uid]
name = names[uid]
month = random.randrange(1, 13)
product, unitprice, maxqty = random.choice(products)
qty = random.randrange(1, maxqty)
data = (pwd, name, month, product, qty, qty*unitprice)
cursor.execute(ins, data)
conn.commit()
print "Done"
| Python |
"""
This example transparently intercepts and shows operations on
builtin objects. Requires the "--objspace-std-withtproxy" option.
"""
from tputil import make_proxy
def make_show_proxy(instance):
def controller(operation):
print "proxy sees:", operation
res = operation.delegate()
return res
tproxy = make_proxy(controller, obj=instance)
return tproxy
if __name__ == '__main__':
mydict = make_show_proxy({})
assert type(mydict) is dict # this looks exactly like a dict
mydict['hello'] = 'world' # will print __setitem__
mydict[42] = 23 # will print __setitem__
assert mydict.pop('hello') == 'world' # will print pop
assert mydict.popitem() == (42,23) # will print popitem
| Python |
"""
This small example implements a basic orthogonal persistence
mechanism on top of PyPy's transparent proxies.
"""
from tputil import make_proxy
list_changeops = set('__iadd__ __imul__ __delitem__ __setitem__ __setattr__'
'__delslice__ __setslice__ '
'append extend insert pop remove reverse sort'.split())
dict_changeops = set('__delitem__ __setitem__ __setattr__'
'clear pop popitem setdefault update'.split())
def ischangeop(operation):
""" return True if this operation is a changing operation
on known builtins (dicts, lists).
"""
if isinstance(operation.obj, list):
changeops = list_changeops
elif isinstance(operation.obj, dict):
changeops = dict_changeops
else:
return False
return operation.opname in changeops
def make_persistent_proxy(instance, storage):
def perform(operation):
res = operation.delegate()
if ischangeop(operation):
print "persisting after:", operation
storage.dump(instance)
if res is not operation.proxyobj and isinstance(res, (dict, list)):
res = make_proxy(perform, obj=res)
return res
return make_proxy(perform, obj=instance)
def load(storage):
obj = storage.load()
return make_persistent_proxy(obj, storage)
if __name__ == '__main__':
import py
storage = py.path.local("/tmp/dictpickle")
pdict = make_persistent_proxy({}, storage)
# the code below is not aware of pdict being a proxy
assert type(pdict) is dict
pdict['hello'] = 'world'
pdict['somelist'] = []
del pdict
newdict = load(storage)
assert newdict == {'hello': 'world', 'somelist': []}
l = newdict['somelist']
l.append(1) # this triggers persisting the whole dict
l.extend([2,3]) # this triggers persisting the whole dict
del newdict, l
newdict = load(storage)
print newdict['somelist'] # will show [1,2,3]
| Python |
#!/usr/bin/env python
"""
Translator Demo
Run this file -- over regular Python! -- to analyse and type-annotate
the functions and class defined in this module, starting from the
entry point function demo().
Requires Pygame.
"""
# Back-Propagation Neural Networks
#
# Written in Python. See http://www.python.org/
#
# Neil Schemenauer <nascheme@enme.ucalgary.ca>
#
# Modifications to the original (Armin Rigo):
# * import random from PyPy's lib, which is Python 2.2's plain
# Python implementation
# * starts the Translator instead of the demo by default.
import sys
import math
import time
import autopath
from pypy.rlib import rrandom
PRINT_IT = False
random = rrandom.Random(1)
# calculate a random number where: a <= rand < b
def rand(a, b):
return (b-a)*random.random() + a
# Make a matrix (we could use NumPy to speed this up)
def makeMatrix(I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill]*J)
return m
class NN:
def __init__(self, ni, nh, no):
# number of input, hidden, and output nodes
self.ni = ni + 1 # +1 for bias node
self.nh = nh
self.no = no
# activations for nodes
self.ai = [1.0]*self.ni
self.ah = [1.0]*self.nh
self.ao = [1.0]*self.no
# create weights
self.wi = makeMatrix(self.ni, self.nh)
self.wo = makeMatrix(self.nh, self.no)
# set them to random vaules
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = rand(-2.0, 2.0)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = rand(-2.0, 2.0)
# last change in weights for momentum
self.ci = makeMatrix(self.ni, self.nh)
self.co = makeMatrix(self.nh, self.no)
def update(self, inputs):
if len(inputs) != self.ni-1:
raise ValueError, 'wrong number of inputs'
# input activations
for i in range(self.ni-1):
#self.ai[i] = 1.0/(1.0+math.exp(-inputs[i]))
self.ai[i] = inputs[i]
# hidden activations
for j in range(self.nh):
sum = 0.0
for i in range(self.ni):
sum = sum + self.ai[i] * self.wi[i][j]
self.ah[j] = 1.0/(1.0+math.exp(-sum))
# output activations
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k]
self.ao[k] = 1.0/(1.0+math.exp(-sum))
return self.ao[:]
def backPropagate(self, targets, N, M):
if len(targets) != self.no:
raise ValueError, 'wrong number of target values'
# calculate error terms for output
output_deltas = [0.0] * self.no
for k in range(self.no):
ao = self.ao[k]
output_deltas[k] = ao*(1-ao)*(targets[k]-ao)
# calculate error terms for hidden
hidden_deltas = [0.0] * self.nh
for j in range(self.nh):
sum = 0.0
for k in range(self.no):
sum = sum + output_deltas[k]*self.wo[j][k]
hidden_deltas[j] = self.ah[j]*(1-self.ah[j])*sum
# update output weights
for j in range(self.nh):
for k in range(self.no):
change = output_deltas[k]*self.ah[j]
self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]
self.co[j][k] = change
#print N*change, M*self.co[j][k]
# update input weights
for i in range(self.ni):
for j in range(self.nh):
change = hidden_deltas[j]*self.ai[i]
self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
self.ci[i][j] = change
# calculate error
error = 0.0
for k in range(len(targets)):
error = error + 0.5*(targets[k]-self.ao[k])**2
return error
def test(self, patterns):
for p in patterns:
if PRINT_IT:
print p[0], '->', self.update(p[0])
def weights(self):
if PRINT_IT:
print 'Input weights:'
for i in range(self.ni):
print self.wi[i]
print
print 'Output weights:'
for j in range(self.nh):
print self.wo[j]
def train(self, patterns, iterations=2000, N=0.5, M=0.1):
# N: learning rate
# M: momentum factor
for i in xrange(iterations):
error = 0.0
for p in patterns:
inputs = p[0]
targets = p[1]
self.update(inputs)
error = error + self.backPropagate(targets, N, M)
if PRINT_IT and i % 100 == 0:
print 'error %f' % error
def demo():
# Teach network XOR function
pat = [
[[0,0], [0]],
[[0,1], [1]],
[[1,0], [1]],
[[1,1], [0]]
]
# create a network with two input, two hidden, and two output nodes
n = NN(2, 3, 1)
# train it with some patterns
n.train(pat, 2000)
# test it
n.test(pat)
if __name__ == '__main__':
print 'Loading...'
from pypy.translator.interactive import Translation
t = Translation(demo)
print 'Annotating...'
t.annotate([])
t.viewcg()
print 'Specializing...'
t.rtype() # enable this to see (some) lower-level Cish operations
print 'Compiling...'
f = t.compile_c()
print 'Running...'
T = time.time()
for i in range(10):
f()
t1 = time.time() - T
print "that took", t1
T = time.time()
for i in range(10):
demo()
t2 = time.time() - T
print "compared to", t2
print "a speed-up of", t2/t1
| Python |
#!/usr/bin/env python
"""
Translator Demo
Run this file -- over regular Python! -- to analyse and type-annotate
the functions and class defined in this module, starting from the
entry point function demo().
Requires Pygame.
"""
# Back-Propagation Neural Networks
#
# Written in Python. See http://www.python.org/
#
# Neil Schemenauer <nascheme@enme.ucalgary.ca>
#
# Modifications to the original (Armin Rigo):
# * import random from PyPy's lib, which is Python 2.2's plain
# Python implementation
# * starts the Translator instead of the demo by default.
import sys
import math
import time
import autopath
from pypy.rlib import rrandom
PRINT_IT = False
random = rrandom.Random(1)
# calculate a random number where: a <= rand < b
def rand(a, b):
return (b-a)*random.random() + a
# Make a matrix (we could use NumPy to speed this up)
def makeMatrix(I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill]*J)
return m
class NN:
def __init__(self, ni, nh, no):
# number of input, hidden, and output nodes
self.ni = ni + 1 # +1 for bias node
self.nh = nh
self.no = no
# activations for nodes
self.ai = [1.0]*self.ni
self.ah = [1.0]*self.nh
self.ao = [1.0]*self.no
# create weights
self.wi = makeMatrix(self.ni, self.nh)
self.wo = makeMatrix(self.nh, self.no)
# set them to random vaules
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = rand(-2.0, 2.0)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = rand(-2.0, 2.0)
# last change in weights for momentum
self.ci = makeMatrix(self.ni, self.nh)
self.co = makeMatrix(self.nh, self.no)
def update(self, inputs):
if len(inputs) != self.ni-1:
raise ValueError, 'wrong number of inputs'
# input activations
for i in range(self.ni-1):
#self.ai[i] = 1.0/(1.0+math.exp(-inputs[i]))
self.ai[i] = inputs[i]
# hidden activations
for j in range(self.nh):
sum = 0.0
for i in range(self.ni):
sum = sum + self.ai[i] * self.wi[i][j]
self.ah[j] = 1.0/(1.0+math.exp(-sum))
# output activations
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k]
self.ao[k] = 1.0/(1.0+math.exp(-sum))
return self.ao[:]
def backPropagate(self, targets, N, M):
if len(targets) != self.no:
raise ValueError, 'wrong number of target values'
# calculate error terms for output
output_deltas = [0.0] * self.no
for k in range(self.no):
ao = self.ao[k]
output_deltas[k] = ao*(1-ao)*(targets[k]-ao)
# calculate error terms for hidden
hidden_deltas = [0.0] * self.nh
for j in range(self.nh):
sum = 0.0
for k in range(self.no):
sum = sum + output_deltas[k]*self.wo[j][k]
hidden_deltas[j] = self.ah[j]*(1-self.ah[j])*sum
# update output weights
for j in range(self.nh):
for k in range(self.no):
change = output_deltas[k]*self.ah[j]
self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]
self.co[j][k] = change
#print N*change, M*self.co[j][k]
# update input weights
for i in range(self.ni):
for j in range(self.nh):
change = hidden_deltas[j]*self.ai[i]
self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
self.ci[i][j] = change
# calculate error
error = 0.0
for k in range(len(targets)):
error = error + 0.5*(targets[k]-self.ao[k])**2
return error
def test(self, patterns):
for p in patterns:
if PRINT_IT:
print p[0], '->', self.update(p[0])
def weights(self):
if PRINT_IT:
print 'Input weights:'
for i in range(self.ni):
print self.wi[i]
print
print 'Output weights:'
for j in range(self.nh):
print self.wo[j]
def train(self, patterns, iterations=2000, N=0.5, M=0.1):
# N: learning rate
# M: momentum factor
for i in xrange(iterations):
error = 0.0
for p in patterns:
inputs = p[0]
targets = p[1]
self.update(inputs)
error = error + self.backPropagate(targets, N, M)
if PRINT_IT and i % 100 == 0:
print 'error %f' % error
def demo():
# Teach network XOR function
pat = [
[[0,0], [0]],
[[0,1], [1]],
[[1,0], [1]],
[[1,1], [0]]
]
# create a network with two input, two hidden, and two output nodes
n = NN(2, 3, 1)
# train it with some patterns
n.train(pat, 2000)
# test it
n.test(pat)
if __name__ == '__main__':
print 'Loading...'
from pypy.translator.interactive import Translation
t = Translation(demo)
print 'Annotating...'
t.annotate([])
t.viewcg()
print 'Specializing...'
t.rtype() # enable this to see (some) lower-level Cish operations
print 'Compiling...'
f = t.compile_c()
print 'Running...'
T = time.time()
for i in range(10):
f()
t1 = time.time() - T
print "that took", t1
T = time.time()
for i in range(10):
demo()
t2 = time.time() - T
print "compared to", t2
print "a speed-up of", t2/t1
| Python |
import time
def f(n):
if n > 1:
return n * f(n-1)
else:
return 1
import pypyjit
pypyjit.enable(f.func_code)
print f(7)
| Python |
import time
ZERO = 0
def f1(n):
"Arbitrary test function."
i = 0
x = 1
while i<n:
j = 0 #ZERO
while j<=i:
j = j + 1
x = x + (i&j)
i = i + 1
return x
try:
import pypyjit
except ImportError:
print "No jit"
else:
pypyjit.enable(f1.func_code)
res = f1(2117)
print res
N = 5
start = time.time()
for i in range(N):
assert f1(2117) == res
end = time.time()
print '%d iterations, time per iteration: %s' % (N, (end-start)/N)
| Python |
import time
def f(n):
r = 1
while n > 1:
r *= n
n -= 1
return r
import pypyjit
pypyjit.enable(f.func_code)
print f(7)
| Python |
from dbc import ContractAspect, ContractError
ContractAspect()
from contract_stack import Stack
def run():
"""This is an example of how contracts work
"""
print "*"*30
print "Creating an empty stack (max_size = 3)"
stack = Stack(3)
try:
print "Empty stack, pop() should fail"
stack.pop()
except ContractError, excpt:
print "\t failed with %s, (OK)" % excpt
else:
print "\t did not failed, (XXX)"
print "\n\n\n"
stack.push(1)
print "push 1 done"
stack.push(2)
print "push 2 done"
stack.push(3)
print "push 3 done"
try:
print "The stack is full, push() should fail"
stack.push(4)
except ContractError, excpt:
print "\t failed with %s, (OK)" % excpt
else:
print "\t did not failed, (XXX)"
print "\n\n\n"
if __name__ == '__main__':
run()
| Python |
import sys, os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
| Python |
"""
This is an example usage of the 'thunk' object space of PyPy.
It implements transparent distributed object manipulation.
Start a server on a local port, say port 8888, with:
$ py.py -o thunk sharedref.py 8888
Waiting for connection on port 8888
Then start and connect a client from the same or another machine:
$ py.py -o thunk sharedref.py ip_or_name:8888
Connecting to ('...', 8888)
Ok
>>> l = [1,2,3]
>>> chan.send(l) # send the list to the server over the connexion
On the server-side:
Connected from ('...', 1046)
>>> l = chan.recv() # receive the list sent above
>>> l
[1, 2, 3]
>>> l.append(4)
Back on the client-side:
>>> l
[1, 2, 3, 4]
The list behaves like a single distributed object, which both sides can
modify and access without needing further explicit synchronization.
There is no difference between who was the original sender or receiver of
the object, nor between which side was originally 'server' or 'client'.
"""
import sys, marshal
from __pypy__ import thunk, become
from socket import *
from select import select
class Channel:
def __init__(self, s, serverside):
# invariants: a shared object 'obj' is
# - either remote, and a thunk, and not a value in self.cache
# - or local (or at least on "our" side of this Channel), and
# then it has a corresponding key in self.cache
self.s = s
self.cache = {}
self.inputfifo = []
self.count = int(not serverside)
## def _check(self, obj):
## print '%s: cache=%r' % (self, self.cache.keys()),
## if is_thunk(obj):
## print 'THUNK'
## else:
## print obj
def sendraw(self, obj):
data = marshal.dumps(obj)
hdr = str(len(data))
hdr = '0'*(10-len(hdr)) + hdr
self.s.sendall(hdr + data)
def _readbytes(self, count):
data = ''
while len(data) < count:
t = self.s.recv(count - len(data))
if not t:
raise EOFError
data += t
return data
def recvraw(self):
datasize = int(self._readbytes(10))
data = self._readbytes(datasize)
return marshal.loads(data)
def send(self, obj, n=None):
#print 'send', n,; self._check(obj)
if n is None:
n = self.count
self.count += 2
data = (n, obj, None)
else:
data = (n, obj)
self.sendraw(data)
become(obj, thunk(self._resume, n))
#print 'done', n,; self._check(obj)
def recv(self):
obj = self.inputfifo.pop(0)
#print 'recv',; self._check(obj)
return obj
def _resume(self, n):
#print 'resume', n,; sys.stdout.flush()
assert n not in self.cache
self.sendraw((n,))
while n not in self.cache:
self.handle_once()
obj = self.cache[n]
#self._check(obj)
return obj
def handle_once(self):
input = self.recvraw()
if len(input) > 1:
obj = input[1]
self.cache[input[0]] = obj
if len(input) > 2:
self.inputfifo.append(obj)
else:
n = input[0]
obj = self.cache[n]
self.send(obj, n)
del self.cache[n]
def mainloop(channels):
stdin = sys.stdin.fileno()
sockfd = [chan.s.fileno() for chan in channels]
while True:
sys.stdout.write('>>> ')
sys.stdout.flush()
while True:
iwtd, owtd, ewtd = select([stdin] + sockfd, [], [stdin])
if stdin in iwtd or stdin in ewtd: break
for chan in channels:
if chan.s.fileno() in iwtd:
chan.handle_once()
code = raw_input()
if not code: break
try:
co = compile(code, '<input>', 'single')
exec co in globals()
except Exception, e:
print e.__class__.__name__, str(e)
def server(port):
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', port))
s.listen(1)
print 'Waiting for connection on port', port
s, addr = s.accept()
print 'Connected from', addr
return Channel(s, True)
def client(addr):
s = socket(AF_INET, SOCK_STREAM)
print 'Connecting to', addr
s.connect(addr)
print 'Ok'
return Channel(s, False)
if __name__ == '__main__':
try:
thunk, become # only available in 'py.py -o thunk'
except NameError:
print __doc__
raise SystemExit(2)
channels = []
for a in sys.argv[1:]:
try:
port = int(a)
except ValueError:
host, port = a.split(':')
port = int(port)
chan = client((host, port))
else:
chan = server(port)
channels.append(chan)
mainloop(channels)
| Python |
class Stack:
"""A very simple stack interface
(not very useful in Python)
"""
def __init__(self, max_size = 10):
self.max_size = max_size
self.elements = []
def _pre_pop(self):
return not self.is_empty()
def _post_pop(self, old, ret):
return ret == old.top() and \
self.size() == old.size() - 1
def pop(self):
return self.elements.pop()
def _pre_push(self, obj):
return obj is not None and not self.is_full()
def _post_push(self, old, ret, obj):
return not self.is_empty() and (self.top() == obj)
def push(self, obj):
self.elements.append(obj)
def top(self):
"""Returns the top element of the stack
"""
return self.elements[-1]
def is_empty(self):
"""Tells whether or not the stack is empty
"""
return not bool(self.elements)
def is_full(self):
"""Tells whether or not the stack is full
"""
return len(self.elements) == self.max_size
def size(self):
"""Returns the current size of the stack
"""
return len(self.elements)
def __str__(self):
return "elements = %s, max_size = %s" % (self.elements, self.max_size)
| Python |
"""This is an example that uses the (prototype) Logic Object Space. To run,
you have to set USE_GREENLETS in pypy.objspace.logic to True and do:
$ py.py -o logic producerconsumer.py
newvar creates a new unbound logical variable. If you try to access an unbound
variable, the current uthread is blocked, until the variable is bound.
"""
def generate(n, limit):
print "generate", n, limit
if n < limit:
return (n, generate(n + 1, limit))
return None
def sum(L, a):
print "sum", a
Head, Tail = newvar(), newvar()
unify(L, (Head, Tail))
if Tail != None:
return sum(Tail, Head + a)
return a + Head
print "eager producer consummer"
print "before"
X = newvar()
S = newvar()
bind(S, uthread(sum, X, 0))
unify(X, uthread(generate, 0, 10))
print "after"
assert S == 45
print S # needs a special treatment
| Python |
"""
Stackless demo.
This example only works on top of a pypy-c compiled with stackless features
and the signal module:
translate.py --stackless targetpypystandalone --withmod-signal
Usage:
pypy-c pickle_coroutine.py --start demo.pickle
Start the computation. You can interrupt it at any time by
pressing Ctrl-C; at this point, the state of the computing
coroutine is saved in demo.pickle.
pypy-c pickle_coroutine.py --resume demo.pickle
Reload the coroutine from demo.pickle and continue running it.
(It can be interrupted again with Ctrl-C.)
This demo is documented in detail in pypy/doc/stackless.txt.
"""
try:
import sys, pickle, signal
from stackless import coroutine
except ImportError:
print __doc__
sys.exit(2)
def ackermann(x, y):
check()
if x == 0:
return y + 1
if y == 0:
return ackermann(x - 1, 1)
return ackermann(x - 1, ackermann(x, y - 1))
# ____________________________________________________________
main = coroutine.getcurrent()
sys.setrecursionlimit(100000)
interrupt_flag = False
def interrupt_handler(*args):
global interrupt_flag
interrupt_flag = True
def check():
if interrupt_flag:
main.switch()
def execute(coro):
signal.signal(signal.SIGINT, interrupt_handler)
res = coro.switch()
if res is None and coro.is_alive: # interrupted!
print "interrupted! writing %s..." % (filename,)
f = open(filename, 'w')
pickle.dump(coro, f)
f.close()
print "done"
else:
print "result:", res
try:
operation, filename = sys.argv[1:]
except ValueError:
print __doc__
sys.exit(2)
if operation == '--start':
coro = coroutine()
coro.bind(ackermann, 3, 7)
print "running from the start..."
execute(coro)
elif operation == '--resume':
print "reloading %s..." % (filename,)
f = open(filename)
coro = pickle.load(f)
f.close()
print "done, running now..."
execute(coro)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Unittest for the util/regexp/re2/unicode.py module."""
import os
import StringIO
from google3.pyglib import flags
from google3.testing.pybase import googletest
from google3.util.regexp.re2 import unicode
_UNICODE_DIR = os.path.join(flags.FLAGS.test_srcdir, "google3", "third_party",
"unicode", "ucd-5.1.0")
class ConvertTest(googletest.TestCase):
"""Test the conversion functions."""
def testUInt(self):
self.assertEquals(0x0000, unicode._UInt("0000"))
self.assertEquals(0x263A, unicode._UInt("263A"))
self.assertEquals(0x10FFFF, unicode._UInt("10FFFF"))
self.assertRaises(unicode.InputError, unicode._UInt, "263")
self.assertRaises(unicode.InputError, unicode._UInt, "263AAAA")
self.assertRaises(unicode.InputError, unicode._UInt, "110000")
def testURange(self):
self.assertEquals([1, 2, 3], unicode._URange("0001..0003"))
self.assertEquals([1], unicode._URange("0001"))
self.assertRaises(unicode.InputError, unicode._URange, "0001..0003..0005")
self.assertRaises(unicode.InputError, unicode._URange, "0003..0001")
self.assertRaises(unicode.InputError, unicode._URange, "0001..0001")
def testUStr(self):
self.assertEquals("0x263A", unicode._UStr(0x263a))
self.assertEquals("0x10FFFF", unicode._UStr(0x10FFFF))
self.assertRaises(unicode.InputError, unicode._UStr, 0x110000)
self.assertRaises(unicode.InputError, unicode._UStr, -1)
_UNICODE_TABLE = """# Commented line, should be ignored.
# The next line is blank and should be ignored.
0041;Capital A;Line 1
0061..007A;Lowercase;Line 2
1F00;<Greek, First>;Ignored
1FFE;<Greek, Last>;Line 3
10FFFF;Runemax;Line 4
0000;Zero;Line 5
"""
_BAD_TABLE1 = """
111111;Not a code point;
"""
_BAD_TABLE2 = """
0000;<Zero, First>;Missing <Zero, Last>
"""
_BAD_TABLE3 = """
0010..0001;Bad range;
"""
class AbortError(Exception):
"""Function should not have been called."""
def Abort():
raise AbortError("Abort")
def StringTable(s, n, f):
unicode.ReadUnicodeTable(StringIO.StringIO(s), n, f)
class ReadUnicodeTableTest(googletest.TestCase):
"""Test the ReadUnicodeTable function."""
def testSimpleTable(self):
ncall = [0] # can't assign to ordinary int in DoLine
def DoLine(codes, fields):
self.assertEquals(3, len(fields))
ncall[0] += 1
self.assertEquals("Line %d" % (ncall[0],), fields[2])
if ncall[0] == 1:
self.assertEquals([0x0041], codes)
self.assertEquals("0041", fields[0])
self.assertEquals("Capital A", fields[1])
elif ncall[0] == 2:
self.assertEquals(range(0x0061, 0x007A + 1), codes)
self.assertEquals("0061..007A", fields[0])
self.assertEquals("Lowercase", fields[1])
elif ncall[0] == 3:
self.assertEquals(range(0x1F00, 0x1FFE + 1), codes)
self.assertEquals("1F00..1FFE", fields[0])
self.assertEquals("Greek", fields[1])
elif ncall[0] == 4:
self.assertEquals([0x10FFFF], codes)
self.assertEquals("10FFFF", fields[0])
self.assertEquals("Runemax", fields[1])
elif ncall[0] == 5:
self.assertEquals([0x0000], codes)
self.assertEquals("0000", fields[0])
self.assertEquals("Zero", fields[1])
StringTable(_UNICODE_TABLE, 3, DoLine)
self.assertEquals(5, ncall[0])
def testErrorTables(self):
self.assertRaises(unicode.InputError, StringTable, _UNICODE_TABLE, 4, Abort)
self.assertRaises(unicode.InputError, StringTable, _UNICODE_TABLE, 2, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE1, 3, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE2, 3, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE3, 3, Abort)
class ParseContinueTest(googletest.TestCase):
"""Test the ParseContinue function."""
def testParseContinue(self):
self.assertEquals(("Private Use", "First"),
unicode._ParseContinue("<Private Use, First>"))
self.assertEquals(("Private Use", "Last"),
unicode._ParseContinue("<Private Use, Last>"))
self.assertEquals(("<Private Use, Blah>", None),
unicode._ParseContinue("<Private Use, Blah>"))
class CaseGroupsTest(googletest.TestCase):
"""Test the CaseGroups function (and the CaseFoldingReader)."""
def FindGroup(self, c):
if type(c) == str:
c = ord(c)
for g in self.groups:
if c in g:
return g
return None
def testCaseGroups(self):
self.groups = unicode.CaseGroups(unicode_dir=_UNICODE_DIR)
self.assertEquals([ord("A"), ord("a")], self.FindGroup("a"))
self.assertEquals(None, self.FindGroup("0"))
class ScriptsTest(googletest.TestCase):
"""Test the Scripts function (and the ScriptsReader)."""
def FindScript(self, c):
if type(c) == str:
c = ord(c)
for script, codes in self.scripts.items():
for code in codes:
if c == code:
return script
return None
def testScripts(self):
self.scripts = unicode.Scripts(unicode_dir=_UNICODE_DIR)
self.assertEquals("Latin", self.FindScript("a"))
self.assertEquals("Common", self.FindScript("0"))
self.assertEquals(None, self.FindScript(0xFFFE))
class CategoriesTest(googletest.TestCase):
"""Test the Categories function (and the UnicodeDataReader)."""
def FindCategory(self, c):
if type(c) == str:
c = ord(c)
short = None
for category, codes in self.categories.items():
for code in codes:
if code == c:
# prefer category Nd over N
if len(category) > 1:
return category
if short == None:
short = category
return short
def testCategories(self):
self.categories = unicode.Categories(unicode_dir=_UNICODE_DIR)
self.assertEquals("Ll", self.FindCategory("a"))
self.assertEquals("Nd", self.FindCategory("0"))
self.assertEquals("Lo", self.FindCategory(0xAD00)) # in First, Last range
self.assertEquals(None, self.FindCategory(0xFFFE))
self.assertEquals("Lo", self.FindCategory(0x8B5A))
self.assertEquals("Lo", self.FindCategory(0x6C38))
self.assertEquals("Lo", self.FindCategory(0x92D2))
self.assertTrue(ord("a") in self.categories["L"])
self.assertTrue(ord("0") in self.categories["N"])
self.assertTrue(0x8B5A in self.categories["L"])
self.assertTrue(0x6C38 in self.categories["L"])
self.assertTrue(0x92D2 in self.categories["L"])
def main():
googletest.main()
if __name__ == "__main__":
main()
| Python |
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Parser for Unicode data files (as distributed by unicode.org)."""
import os
import re
import urllib2
# Directory or URL where Unicode tables reside.
_UNICODE_DIR = "http://www.unicode.org/Public/6.0.0/ucd"
# Largest valid Unicode code value.
_RUNE_MAX = 0x10FFFF
class Error(Exception):
"""Unicode error base class."""
class InputError(Error):
"""Unicode input error class. Raised on invalid input."""
def _UInt(s):
"""Converts string to Unicode code point ('263A' => 0x263a).
Args:
s: string to convert
Returns:
Unicode code point
Raises:
InputError: the string is not a valid Unicode value.
"""
try:
v = int(s, 16)
except ValueError:
v = -1
if len(s) < 4 or len(s) > 6 or v < 0 or v > _RUNE_MAX:
raise InputError("invalid Unicode value %s" % (s,))
return v
def _URange(s):
"""Converts string to Unicode range.
'0001..0003' => [1, 2, 3].
'0001' => [1].
Args:
s: string to convert
Returns:
Unicode range
Raises:
InputError: the string is not a valid Unicode range.
"""
a = s.split("..")
if len(a) == 1:
return [_UInt(a[0])]
if len(a) == 2:
lo = _UInt(a[0])
hi = _UInt(a[1])
if lo < hi:
return range(lo, hi + 1)
raise InputError("invalid Unicode range %s" % (s,))
def _UStr(v):
"""Converts Unicode code point to hex string.
0x263a => '0x263A'.
Args:
v: code point to convert
Returns:
Unicode string
Raises:
InputError: the argument is not a valid Unicode value.
"""
if v < 0 or v > _RUNE_MAX:
raise InputError("invalid Unicode value %s" % (v,))
return "0x%04X" % (v,)
def _ParseContinue(s):
"""Parses a Unicode continuation field.
These are of the form '<Name, First>' or '<Name, Last>'.
Instead of giving an explicit range in a single table entry,
some Unicode tables use two entries, one for the first
code value in the range and one for the last.
The first entry's description is '<Name, First>' instead of 'Name'
and the second is '<Name, Last>'.
'<Name, First>' => ('Name', 'First')
'<Name, Last>' => ('Name', 'Last')
'Anything else' => ('Anything else', None)
Args:
s: continuation field string
Returns:
pair: name and ('First', 'Last', or None)
"""
match = re.match("<(.*), (First|Last)>", s)
if match is not None:
return match.groups()
return (s, None)
def ReadUnicodeTable(filename, nfields, doline):
"""Generic Unicode table text file reader.
The reader takes care of stripping out comments and also
parsing the two different ways that the Unicode tables specify
code ranges (using the .. notation and splitting the range across
multiple lines).
Each non-comment line in the table is expected to have the given
number of fields. The first field is known to be the Unicode value
and the second field its description.
The reader calls doline(codes, fields) for each entry in the table.
If fn raises an exception, the reader prints that exception,
prefixed with the file name and line number, and continues
processing the file. When done with the file, the reader re-raises
the first exception encountered during the file.
Arguments:
filename: the Unicode data file to read, or a file-like object.
nfields: the number of expected fields per line in that file.
doline: the function to call for each table entry.
Raises:
InputError: nfields is invalid (must be >= 2).
"""
if nfields < 2:
raise InputError("invalid number of fields %d" % (nfields,))
if type(filename) == str:
if filename.startswith("http://"):
fil = urllib2.urlopen(filename)
else:
fil = open(filename, "r")
else:
fil = filename
first = None # first code in multiline range
expect_last = None # tag expected for "Last" line in multiline range
lineno = 0 # current line number
for line in fil:
lineno += 1
try:
# Chop # comments and white space; ignore empty lines.
sharp = line.find("#")
if sharp >= 0:
line = line[:sharp]
line = line.strip()
if not line:
continue
# Split fields on ";", chop more white space.
# Must have the expected number of fields.
fields = [s.strip() for s in line.split(";")]
if len(fields) != nfields:
raise InputError("wrong number of fields %d %d - %s" %
(len(fields), nfields, line))
# The Unicode text files have two different ways
# to list a Unicode range. Either the first field is
# itself a range (0000..FFFF), or the range is split
# across two lines, with the second field noting
# the continuation.
codes = _URange(fields[0])
(name, cont) = _ParseContinue(fields[1])
if expect_last is not None:
# If the last line gave the First code in a range,
# this one had better give the Last one.
if (len(codes) != 1 or codes[0] <= first or
cont != "Last" or name != expect_last):
raise InputError("expected Last line for %s" %
(expect_last,))
codes = range(first, codes[0] + 1)
first = None
expect_last = None
fields[0] = "%04X..%04X" % (codes[0], codes[-1])
fields[1] = name
elif cont == "First":
# Otherwise, if this is the First code in a range,
# remember it and go to the next line.
if len(codes) != 1:
raise InputError("bad First line: range given")
expect_last = name
first = codes[0]
continue
doline(codes, fields)
except Exception, e:
print "%s:%d: %s" % (filename, lineno, e)
raise
if expect_last is not None:
raise InputError("expected Last line for %s; got EOF" %
(expect_last,))
def CaseGroups(unicode_dir=_UNICODE_DIR):
"""Returns list of Unicode code groups equivalent under case folding.
Each group is a sorted list of code points,
and the list of groups is sorted by first code point
in the group.
Args:
unicode_dir: Unicode data directory
Returns:
list of Unicode code groups
"""
# Dict mapping lowercase code point to fold-equivalent group.
togroup = {}
def DoLine(codes, fields):
"""Process single CaseFolding.txt line, updating togroup."""
(_, foldtype, lower, _) = fields
if foldtype not in ("C", "S"):
return
lower = _UInt(lower)
togroup.setdefault(lower, [lower]).extend(codes)
ReadUnicodeTable(unicode_dir+"/CaseFolding.txt", 4, DoLine)
groups = togroup.values()
for g in groups:
g.sort()
groups.sort()
return togroup, groups
def Scripts(unicode_dir=_UNICODE_DIR):
"""Returns dict mapping script names to code lists.
Args:
unicode_dir: Unicode data directory
Returns:
dict mapping script names to code lists
"""
scripts = {}
def DoLine(codes, fields):
"""Process single Scripts.txt line, updating scripts."""
(_, name) = fields
scripts.setdefault(name, []).extend(codes)
ReadUnicodeTable(unicode_dir+"/Scripts.txt", 2, DoLine)
return scripts
def Categories(unicode_dir=_UNICODE_DIR):
"""Returns dict mapping category names to code lists.
Args:
unicode_dir: Unicode data directory
Returns:
dict mapping category names to code lists
"""
categories = {}
def DoLine(codes, fields):
"""Process single UnicodeData.txt line, updating categories."""
category = fields[2]
categories.setdefault(category, []).extend(codes)
# Add codes from Lu into L, etc.
if len(category) > 1:
short = category[0]
categories.setdefault(short, []).extend(codes)
ReadUnicodeTable(unicode_dir+"/UnicodeData.txt", 15, DoLine)
return categories
| Python |
# coding=utf-8
# (The line above is necessary so that I can use 世界 in the
# *comment* below without Python getting all bent out of shape.)
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = /path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
In addition to the new commands, this extension introduces
the file pattern syntax @nnnnnn, where nnnnnn is a change list
number, to mean the files included in that change list, which
must be associated with the current client.
For example, if change 123456 contains the files x.go and y.go,
"hg diff @123456" is equivalent to"hg diff x.go y.go".
'''
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
# We require Python 2.6 for the json package.
if sys.version < '2.6':
print >>sys.stderr, "The codereview extension requires Python 2.6 or newer."
print >>sys.stderr, "You are running Python " + sys.version
sys.exit(2)
import json
import os
import re
import stat
import subprocess
import threading
import time
from mercurial import commands as hg_commands
from mercurial import util as hg_util
defaultcc = None
codereview_disabled = None
real_rollback = None
releaseBranch = None
server = "codereview.appspot.com"
server_url_base = None
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
# The different parts of the file are separated by banners like this one.
#######################################################################
# Helpers
def RelativePath(path, cwd):
n = len(cwd)
if path.startswith(cwd) and path[n] == '/':
return path[n+1:]
return path
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
#######################################################################
# RE: UNICODE STRING HANDLING
#
# Python distinguishes between the str (string of bytes)
# and unicode (string of code points) types. Most operations
# work on either one just fine, but some (like regexp matching)
# require unicode, and others (like write) require str.
#
# As befits the language, Python hides the distinction between
# unicode and str by converting between them silently, but
# *only* if all the bytes/code points involved are 7-bit ASCII.
# This means that if you're not careful, your program works
# fine on "hello, world" and fails on "hello, 世界". And of course,
# the obvious way to be careful - use static types - is unavailable.
# So the only way is trial and error to find where to put explicit
# conversions.
#
# Because more functions do implicit conversion to str (string of bytes)
# than do implicit conversion to unicode (string of code points),
# the convention in this module is to represent all text as str,
# converting to unicode only when calling a unicode-only function
# and then converting back to str as soon as possible.
def typecheck(s, t):
if type(s) != t:
raise hg_util.Abort("type check failed: %s has type %s != %s" % (repr(s), type(s), t))
# If we have to pass unicode instead of str, ustr does that conversion clearly.
def ustr(s):
typecheck(s, str)
return s.decode("utf-8")
# Even with those, Mercurial still sometimes turns unicode into str
# and then tries to use it as ascii. Change Mercurial's default.
def set_mercurial_encoding_to_utf8():
from mercurial import encoding
encoding.encoding = 'utf-8'
set_mercurial_encoding_to_utf8()
# Even with those we still run into problems.
# I tried to do things by the book but could not convince
# Mercurial to let me check in a change with UTF-8 in the
# CL description or author field, no matter how many conversions
# between str and unicode I inserted and despite changing the
# default encoding. I'm tired of this game, so set the default
# encoding for all of Python to 'utf-8', not 'ascii'.
def default_to_utf8():
import sys
stdout, __stdout__ = sys.stdout, sys.__stdout__
reload(sys) # site.py deleted setdefaultencoding; get it back
sys.stdout, sys.__stdout__ = stdout, __stdout__
sys.setdefaultencoding('utf-8')
default_to_utf8()
#######################################################################
# Status printer for long-running commands
global_status = None
def set_status(s):
# print >>sys.stderr, "\t", time.asctime(), s
global global_status
global_status = s
class StatusThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# pause a reasonable amount of time before
# starting to display status messages, so that
# most hg commands won't ever see them.
time.sleep(30)
# now show status every 15 seconds
while True:
time.sleep(15 - time.time() % 15)
s = global_status
if s is None:
continue
if s == "":
s = "(unknown status)"
print >>sys.stderr, time.asctime(), s
def start_status_thread():
t = StatusThread()
t.setDaemon(True) # allowed to exit if t is still running
t.start()
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
class CL(object):
def __init__(self, name):
typecheck(name, str)
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
self.copied_from = None # None means current user
self.mailed = False
self.private = False
self.lgtm = []
def DiskText(self):
cl = self
s = ""
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n\n"
if cl.private:
s += "Private: " + str(self.private) + "\n"
s += "Mailed: " + str(self.mailed) + "\n"
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
typecheck(s, str)
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
if cl.private:
s += "Private: True\n"
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
if cl.local or cl.name == "new":
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
typecheck(s, str)
return s
def PendingText(self, quick=False):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
if cl.copied_from:
s += "\tAuthor: " + cl.copied_from + "\n"
if not quick:
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
for (who, line) in cl.lgtm:
s += "\t\t" + who + ": " + line + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
typecheck(s, str)
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo, gofmt_just_warn=True, creating=True)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
if sys.platform == "win32" and os.path.isfile(path):
os.remove(path)
os.rename(path+'!', path)
if self.web and not self.copied_from:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc),
private=self.private)
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
typecheck(s, str)
return s
def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False, creating=False, quiet=False):
if not self.files and not creating:
ui.warn("no files in change list\n")
if ui.configbool("codereview", "force_gofmt", True) and gofmt:
CheckFormat(ui, repo, self.files, just_warn=gofmt_just_warn)
set_status("uploading CL metadata + diffs")
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
]
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = None
# We do not include files when creating the issue,
# because we want the patch sets to record the repository
# and base revision they are diffs against. We use the patch
# set message for that purpose, but there is no message with
# the first patch set. Instead the message gets used as the
# new CL's overall subject. So omit the diffs when creating
# and then we'll run an immediate upload.
# This has the effect that every CL begins with an empty "Patch set 1".
if self.files and not creating:
vcs = MercurialVCS(upload_options, ui, repo)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
else:
uploaded_diff_file = [("data", "data.diff", emptydiff)]
if vcs and self.name != "new":
form_fields.append(("subject", "diff -r " + vcs.base_rev + " " + ui.expandpath("default")))
else:
# First upload sets the subject for the CL itself.
form_fields.append(("subject", self.Subject()))
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
if response_body.startswith("Issue updated.") and quiet:
pass
else:
ui.status(msg + "\n")
set_status("uploaded CL metadata + diffs")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise hg_util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
set_status("uploading patches")
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
if vcs:
set_status("uploading base files")
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if send_mail:
set_status("sending mail")
MySend("/" + issue + "/mail", payload="")
self.web = True
set_status("flushing changes to disk")
self.Flush(ui, repo)
return
def Mail(self, ui, repo):
pmsg = "Hello " + JoinComma(self.reviewer)
if self.cc:
pmsg += " (cc: %s)" % (', '.join(self.cc),)
pmsg += ",\n"
pmsg += "\n"
repourl = ui.expandpath("default")
if not self.mailed:
pmsg += "I'd like you to review this change to\n" + repourl + "\n"
else:
pmsg += "Please take another look.\n"
typecheck(pmsg, str)
PostMessage(ui, self.name, pmsg, subject=self.Subject())
self.mailed = True
self.Flush(ui, repo)
def GoodCLName(name):
typecheck(name, str)
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
typecheck(text, str)
typecheck(name, str)
sname = None
lineno = 0
sections = {
'Author': '',
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
'Mailed': '',
'Private': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
if sections['Author']:
cl.copied_from = sections['Author']
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
line = line.strip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if sections['Mailed'] != 'False':
# Odd default, but avoids spurious mailings when
# reading old CLs that do not have a Mailed: line.
# CLs created with this update will always have
# Mailed: False on disk.
cl.mailed = True
if sections['Private'] in ('True', 'true', 'Yes', 'yes'):
cl.private = True
if cl.desc == '<enter description here>':
cl.desc = ''
return cl, 0, ''
def SplitCommaSpace(s):
typecheck(s, str)
s = s.strip()
if s == "":
return []
return re.split(", *", s)
def CutDomain(s):
typecheck(s, str)
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
for s in l:
typecheck(s, str)
return ", ".join(l)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
def IsLocalCL(ui, repo, name):
return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
typecheck(name, str)
set_status("loading CL " + name)
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
set_status("getting issue metadata from web")
d = JSONGet(ui, "/api/" + name + "?messages=true")
set_status(None)
if d is None:
return None, "cannot load CL %s from server" % (name,)
if 'owner_email' not in d or 'issue' not in d or str(d['issue']) != name:
return None, "malformed response loading CL data from code review server"
cl.dict = d
cl.reviewer = d.get('reviewers', [])
cl.cc = d.get('cc', [])
if cl.local and cl.copied_from and cl.desc:
# local copy of CL written by someone else
# and we saved a description. use that one,
# so that committers can edit the description
# before doing hg submit.
pass
else:
cl.desc = d.get('description', "")
cl.url = server_url_base + name
cl.web = True
cl.private = d.get('private', False) != False
cl.lgtm = []
for m in d.get('messages', []):
if m.get('approval', False) == True:
who = re.sub('@.*', '', m.get('sender', ''))
text = re.sub("\n(.|\n)*", '', m.get('text', ''))
cl.lgtm.append((who, text))
set_status("loaded CL " + name)
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
active = []
first = True
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
if web and first:
# first request: wait in case it needs to authenticate
# otherwise we get lots of user/password prompts
# running in parallel.
t.join()
if t.cl:
m[t.cl.name] = t.cl
first = False
else:
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
typecheck(url, str)
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
typecheck(dir, str)
return dir
# Turn leading tabs into spaces, so that the common white space
# prefix doesn't get confused when people's editors write out
# some lines with spaces, some with tabs. Only a heuristic
# (some editors don't use 8 spaces either) but a useful one.
def TabsToSpaces(line):
i = 0
while i < len(line) and line[i] == '\t':
i += 1
return ' '*(8*i) + line[i:]
# Strip maximal common leading white space prefix from text
def StripCommon(text):
typecheck(text, str)
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
line = TabsToSpaces(line)
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
line = TabsToSpaces(line)
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
typecheck(t, str)
return t
# Indent text with indent.
def Indent(text, indent):
typecheck(text, str)
typecheck(indent, str)
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
typecheck(t, str)
return t
# Return the first line of l
def line1(text):
typecheck(text, str)
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
desc_re = '^(.+: |(tag )?(release|weekly)\.|fix build|undo CL)'
desc_msg = '''Your CL description appears not to use the standard form.
The first line of your change description is conventionally a
one-line summary of the change, prefixed by the primary affected package,
and is used as the subject for code review mail; the rest of the description
elaborates.
Examples:
encoding/rot13: new package
math: add IsInf, IsNaN
net: fix cname in LookupHost
unicode: update to Unicode 5.0.2
'''
def promptyesno(ui, msg):
return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
def promptremove(ui, repo, f):
if promptyesno(ui, "hg remove %s (y/n)?" % (f,)):
if hg_commands.remove(ui, repo, 'path:'+f) != 0:
ui.warn("error removing %s" % (f,))
def promptadd(ui, repo, f):
if promptyesno(ui, "hg add %s (y/n)?" % (f,)):
if hg_commands.add(ui, repo, 'path:'+f) != 0:
ui.warn("error adding %s" % (f,))
def EditCL(ui, repo, cl):
set_status(None) # do not show status
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
# We can't trust Mercurial + Python not to die before making the change,
# so, by popular demand, just scribble the most recent CL edit into
# $(hg root)/last-change so that if Mercurial does die, people
# can look there for their work.
try:
f = open(repo.root+"/last-change", "w")
f.write(s)
f.close()
except:
pass
clx, line, err = ParseCL(s, cl.name)
if err != '':
if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
return "change list not modified"
continue
# Check description.
if clx.desc == '':
if promptyesno(ui, "change list should have a description\nre-edit (y/n)?"):
continue
elif re.search('<enter reason for undo>', clx.desc):
if promptyesno(ui, "change list description omits reason for undo\nre-edit (y/n)?"):
continue
elif not re.match(desc_re, clx.desc.split('\n')[0]):
if promptyesno(ui, desc_msg + "re-edit (y/n)?"):
continue
# Check file list for files that need to be hg added or hg removed
# or simply aren't understood.
pats = ['path:'+f for f in clx.files]
changed = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
deleted = hg_matchPattern(ui, repo, *pats, deleted=True)
unknown = hg_matchPattern(ui, repo, *pats, unknown=True)
ignored = hg_matchPattern(ui, repo, *pats, ignored=True)
clean = hg_matchPattern(ui, repo, *pats, clean=True)
files = []
for f in clx.files:
if f in changed:
files.append(f)
continue
if f in deleted:
promptremove(ui, repo, f)
files.append(f)
continue
if f in unknown:
promptadd(ui, repo, f)
files.append(f)
continue
if f in ignored:
ui.warn("error: %s is excluded by .hgignore; omitting\n" % (f,))
continue
if f in clean:
ui.warn("warning: %s is listed in the CL but unchanged\n" % (f,))
files.append(f)
continue
p = repo.root + '/' + f
if os.path.isfile(p):
ui.warn("warning: %s is a file but not known to hg\n" % (f,))
files.append(f)
continue
if os.path.isdir(p):
ui.warn("error: %s is a directory, not a file; omitting\n" % (f,))
continue
ui.warn("error: %s does not exist; omitting\n" % (f,))
clx.files = files
cl.desc = clx.desc
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
cl.private = clx.private
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts, defaultcc=None):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
if err != "":
return None, err
else:
cl = CL("new")
cl.local = True
cl.files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if not cl.files:
return None, "no files changed"
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if defaultcc:
cl.cc = Add(cl.cc, defaultcc)
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
#######################################################################
# Change list file management
# Return list of changed files in repository that match pats.
# The patterns came from the command line, so we warn
# if they have no effect or cannot be understood.
def ChangedFiles(ui, repo, pats, taken=None):
taken = taken or {}
# Run each pattern separately so that we can warn about
# patterns that didn't do anything useful.
for p in pats:
for f in hg_matchPattern(ui, repo, p, unknown=True):
promptadd(ui, repo, f)
for f in hg_matchPattern(ui, repo, p, removed=True):
promptremove(ui, repo, f)
files = hg_matchPattern(ui, repo, p, modified=True, added=True, removed=True)
for f in files:
if f in taken:
ui.warn("warning: %s already in CL %s\n" % (f, taken[f].name))
if not files:
ui.warn("warning: %s did not match any modified files\n" % (p,))
# Again, all at once (eliminates duplicates)
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
l.sort()
if taken:
l = Sub(l, taken.keys())
return l
# Return list of changed files in repository that match pats and still exist.
def ChangedExistingFiles(ui, repo, pats, opts):
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True)
l.sort()
return l
# Return list of files claimed by existing CLs
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats):
return ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
#######################################################################
# File format checking.
def CheckFormat(ui, repo, files, just_warn=False):
set_status("running gofmt")
CheckGofmt(ui, repo, files, just_warn)
CheckTabfmt(ui, repo, files, just_warn)
# Check that gofmt run on the list of files does not change them
def CheckGofmt(ui, repo, files, just_warn):
files = gofmt_required(files)
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
if not files:
return
try:
cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=sys.platform != "win32")
cmd.stdin.close()
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
data = cmd.stdout.read()
errors = cmd.stderr.read()
cmd.wait()
set_status("done with gofmt")
if len(errors) > 0:
ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
return
if len(data) > 0:
msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
# Check that *.[chys] files indent using tabs.
def CheckTabfmt(ui, repo, files, just_warn):
files = [f for f in files if f.startswith('src/') and re.search(r"\.[chys]$", f) and not re.search(r"\.tab\.[ch]$", f)]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
badfiles = []
for f in files:
try:
for line in open(f, 'r'):
# Four leading spaces is enough to complain about,
# except that some Plan 9 code uses four spaces as the label indent,
# so allow that.
if line.startswith(' ') and not re.match(' [A-Za-z0-9_]+:', line):
badfiles.append(f)
break
except:
# ignore cannot open file, etc.
pass
if len(badfiles) > 0:
msg = "these files use spaces for indentation (use tabs instead):\n\t" + "\n\t".join(badfiles)
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
#######################################################################
# CONTRIBUTORS file parsing
contributorsCache = None
contributorsURL = None
def ReadContributors(ui, repo):
global contributorsCache
if contributorsCache is not None:
return contributorsCache
try:
if contributorsURL is not None:
opening = contributorsURL
f = urllib2.urlopen(contributorsURL)
else:
opening = repo.root + '/CONTRIBUTORS'
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
ui.write("warning: cannot open %s: %s\n" % (opening, ExceptionDetail()))
return
contributors = {}
for line in f:
# CONTRIBUTORS is a list of lines like:
# Person <email>
# Person <email> <alt-email>
# The first email address is the one used in commit logs.
if line.startswith('#'):
continue
m = re.match(r"([^<>]+\S)\s+(<[^<>\s]+>)((\s+<[^<>\s]+>)*)\s*$", line)
if m:
name = m.group(1)
email = m.group(2)[1:-1]
contributors[email.lower()] = (name, email)
for extra in m.group(3).split():
contributors[extra[1:-1].lower()] = (name, email)
contributorsCache = contributors
return contributors
def CheckContributor(ui, repo, user=None):
set_status("checking CONTRIBUTORS file")
user, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise hg_util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
def FindContributor(ui, repo, user=None, warn=True):
if not user:
user = ui.config("ui", "username")
if not user:
raise hg_util.Abort("[ui] username is not configured in .hgrc")
user = user.lower()
m = re.match(r".*<(.*)>", user)
if m:
user = m.group(1)
contributors = ReadContributors(ui, repo)
if user not in contributors:
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return user, None
user, email = contributors[user]
return email, "%s <%s>" % (user, email)
#######################################################################
# Mercurial helper functions.
# Read http://mercurial.selenic.com/wiki/MercurialApi before writing any of these.
# We use the ui.pushbuffer/ui.popbuffer + hg_commands.xxx tricks for all interaction
# with Mercurial. It has proved the most stable as they make changes.
hgversion = hg_util.version()
# We require Mercurial 1.9 and suggest Mercurial 2.0.
# The details of the scmutil package changed then,
# so allowing earlier versions would require extra band-aids below.
# Ubuntu 11.10 ships with Mercurial 1.9.1 as the default version.
hg_required = "1.9"
hg_suggested = "2.0"
old_message = """
The code review extension requires Mercurial """+hg_required+""" or newer.
You are using Mercurial """+hgversion+""".
To install a new Mercurial, use
sudo easy_install mercurial=="""+hg_suggested+"""
or visit http://mercurial.selenic.com/downloads/.
"""
linux_message = """
You may need to clear your current Mercurial installation by running:
sudo apt-get remove mercurial mercurial-common
sudo rm -rf /etc/mercurial
"""
if hgversion < hg_required:
msg = old_message
if os.access("/etc/mercurial", 0):
msg += linux_message
raise hg_util.Abort(msg)
from mercurial.hg import clean as hg_clean
from mercurial import cmdutil as hg_cmdutil
from mercurial import error as hg_error
from mercurial import match as hg_match
from mercurial import node as hg_node
class uiwrap(object):
def __init__(self, ui):
self.ui = ui
ui.pushbuffer()
self.oldQuiet = ui.quiet
ui.quiet = True
self.oldVerbose = ui.verbose
ui.verbose = False
def output(self):
ui = self.ui
ui.quiet = self.oldQuiet
ui.verbose = self.oldVerbose
return ui.popbuffer()
def to_slash(path):
if sys.platform == "win32":
return path.replace('\\', '/')
return path
def hg_matchPattern(ui, repo, *pats, **opts):
w = uiwrap(ui)
hg_commands.status(ui, repo, *pats, **opts)
text = w.output()
ret = []
prefix = to_slash(os.path.realpath(repo.root))+'/'
for line in text.split('\n'):
f = line.split()
if len(f) > 1:
if len(pats) > 0:
# Given patterns, Mercurial shows relative to cwd
p = to_slash(os.path.realpath(f[1]))
if not p.startswith(prefix):
print >>sys.stderr, "File %s not in repo root %s.\n" % (p, prefix)
else:
ret.append(p[len(prefix):])
else:
# Without patterns, Mercurial shows relative to root (what we want)
ret.append(to_slash(f[1]))
return ret
def hg_heads(ui, repo):
w = uiwrap(ui)
hg_commands.heads(ui, repo)
return w.output()
noise = [
"",
"resolving manifests",
"searching for changes",
"couldn't find merge tool hgmerge",
"adding changesets",
"adding manifests",
"adding file changes",
"all local heads known remotely",
]
def isNoise(line):
line = str(line)
for x in noise:
if line == x:
return True
return False
def hg_incoming(ui, repo):
w = uiwrap(ui)
ret = hg_commands.incoming(ui, repo, force=False, bundle="")
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_log(ui, repo, **opts):
for k in ['date', 'keyword', 'rev', 'user']:
if not opts.has_key(k):
opts[k] = ""
w = uiwrap(ui)
ret = hg_commands.log(ui, repo, **opts)
if ret:
raise hg_util.Abort(ret)
return w.output()
def hg_outgoing(ui, repo, **opts):
w = uiwrap(ui)
ret = hg_commands.outgoing(ui, repo, **opts)
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_pull(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True # for file list
err = hg_commands.pull(ui, repo, **opts)
for line in w.output().split('\n'):
if isNoise(line):
continue
if line.startswith('moving '):
line = 'mv ' + line[len('moving '):]
if line.startswith('getting ') and line.find(' to ') >= 0:
line = 'mv ' + line[len('getting '):]
if line.startswith('getting '):
line = '+ ' + line[len('getting '):]
if line.startswith('removing '):
line = '- ' + line[len('removing '):]
ui.write(line + '\n')
return err
def hg_push(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True
err = hg_commands.push(ui, repo, **opts)
for line in w.output().split('\n'):
if not isNoise(line):
ui.write(line + '\n')
return err
def hg_commit(ui, repo, *pats, **opts):
return hg_commands.commit(ui, repo, *pats, **opts)
#######################################################################
# Mercurial precommit hook to disable commit except through this interface.
commit_okay = False
def precommithook(ui, repo, **opts):
if commit_okay:
return False # False means okay.
ui.write("\ncodereview extension enabled; use mail, upload, or submit instead of commit\n\n")
return True
#######################################################################
# @clnumber file pattern support
# We replace scmutil.match with the MatchAt wrapper to add the @clnumber pattern.
match_repo = None
match_ui = None
match_orig = None
def InstallMatch(ui, repo):
global match_repo
global match_ui
global match_orig
match_ui = ui
match_repo = repo
from mercurial import scmutil
match_orig = scmutil.match
scmutil.match = MatchAt
def MatchAt(ctx, pats=None, opts=None, globbed=False, default='relpath'):
taken = []
files = []
pats = pats or []
opts = opts or {}
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if clname == "default":
files = DefaultFiles(match_ui, match_repo, [])
else:
if not GoodCLName(clname):
raise hg_util.Abort("invalid CL name " + clname)
cl, err = LoadCL(match_repo.ui, match_repo, clname, web=False)
if err != '':
raise hg_util.Abort("loading CL " + clname + ": " + err)
if not cl.files:
raise hg_util.Abort("no files in CL " + clname)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
# work-around for http://selenic.com/hg/rev/785bbc8634f8
if not hasattr(ctx, 'match'):
ctx = ctx[None]
return match_orig(ctx, pats=pats, opts=opts, globbed=globbed, default=default)
#######################################################################
# Commands added by code review extension.
# As of Mercurial 2.1 the commands are all required to return integer
# exit codes, whereas earlier versions allowed returning arbitrary strings
# to be printed as errors. We wrap the old functions to make sure we
# always return integer exit codes now. Otherwise Mercurial dies
# with a TypeError traceback (unsupported operand type(s) for &: 'str' and 'int').
# Introduce a Python decorator to convert old functions to the new
# stricter convention.
def hgcommand(f):
def wrapped(ui, repo, *pats, **opts):
err = f(ui, repo, *pats, **opts)
if type(err) is int:
return err
if not err:
return 0
raise hg_util.Abort(err)
wrapped.__doc__ = f.__doc__
return wrapped
#######################################################################
# hg change
@hgcommand
def change(ui, repo, *pats, **opts):
"""create, edit or delete a change list
Create, edit or delete a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
Deleting a change with the -d or -D flag does not affect
the contents of the files listed in that change. To revert
the files listed in a change, use
hg revert @123456
before running hg change -d 123456.
"""
if codereview_disabled:
return codereview_disabled
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
return "cannot specify CL name and file patterns"
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
return err
if not cl.local and (opts["stdin"] or not opts["stdout"]):
return "cannot change non-local CL " + name
else:
name = "new"
cl = CL("new")
if repo[None].branch() != "default":
return "cannot create CL outside default branch; switch with 'hg update default'"
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if opts["delete"] or opts["deletelocal"]:
if opts["delete"] and opts["deletelocal"]:
return "cannot use -d and -D together"
flag = "-d"
if opts["deletelocal"]:
flag = "-D"
if name == "new":
return "cannot use "+flag+" with file patterns"
if opts["stdin"] or opts["stdout"]:
return "cannot use "+flag+" with -i or -o"
if not cl.local:
return "cannot change non-local CL " + name
if opts["delete"]:
if cl.copied_from:
return "original author must delete CL; hg change -D will remove locally"
PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed)
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
return "error parsing change list: line %d: %s" % (line, err)
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if clx.private != cl.private:
cl.private = clx.private
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
return err
dirty[cl] = True
for d, _ in dirty.items():
name = d.name
d.Flush(ui, repo)
if name == "new":
d.Upload(ui, repo, quiet=True)
if opts["stdout"]:
ui.write(cl.EditorText())
elif opts["pending"]:
ui.write(cl.PendingText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
#######################################################################
# hg code-login (broken?)
@hgcommand
def code_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
if codereview_disabled:
return codereview_disabled
MySend(None)
#######################################################################
# hg clpatch / undo / release-apply / download
# All concerned with applying or unapplying patches to the repository.
@hgcommand
def clpatch(ui, repo, clname, **opts):
"""import a patch from the code review server
Imports a patch from the code review server into the local client.
If the local client has already modified any of the files that the
patch modifies, this command will refuse to apply the patch.
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
if repo[None].branch() != "default":
return "cannot run hg clpatch outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
@hgcommand
def undo(ui, repo, clname, **opts):
"""undo the effect of a CL
Creates a new CL that undoes an earlier CL.
After creating the CL, opens the CL text for editing so that
you can add the reason for the undo to the description.
"""
if repo[None].branch() != "default":
return "cannot run hg undo outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="undo")
@hgcommand
def release_apply(ui, repo, clname, **opts):
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch.
The current client must either be clean or already be in
the release branch.
The release branch must be created by starting with a
clean client, disabling the code review plugin, and running:
hg update weekly.YYYY-MM-DD
hg branch release-branch.rNN
hg commit -m 'create release-branch.rNN'
hg push --new-branch
Then re-enable the code review plugin.
People can test the release branch by running
hg update release-branch.rNN
in a clean client. To return to the normal tree,
hg update default
Move changes since the weekly into the release branch
using hg release-apply followed by the usual code review
process and hg submit.
When it comes time to tag the release, record the
final long-form tag of the release-branch.rNN
in the *default* branch's .hgtags file. That is, run
hg update default
and then edit .hgtags as you would for a weekly.
"""
c = repo[None]
if not releaseBranch:
return "no active release branches"
if c.branch() != releaseBranch:
if c.modified() or c.added() or c.removed():
raise hg_util.Abort("uncommitted local changes - cannot switch branches")
err = hg_clean(repo, releaseBranch)
if err:
return err
try:
err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
if err:
raise hg_util.Abort(err)
except Exception, e:
hg_clean(repo, "default")
raise e
return None
def rev2clname(rev):
# Extract CL name from revision description.
# The last line in the description that is a codereview URL is the real one.
# Earlier lines might be part of the user-written description.
all = re.findall('(?m)^http://codereview.appspot.com/([0-9]+)$', rev.description())
if len(all) > 0:
return all[-1]
return ""
undoHeader = """undo CL %s / %s
<enter reason for undo>
««« original CL description
"""
undoFooter = """
»»»
"""
backportHeader = """[%s] %s
««« CL %s / %s
"""
backportFooter = """
»»»
"""
# Implementation of clpatch/undo.
def clpatch_or_undo(ui, repo, clname, opts, mode):
if codereview_disabled:
return codereview_disabled
if mode == "undo" or mode == "backport":
# Find revision in Mercurial repository.
# Assume CL number is 7+ decimal digits.
# Otherwise is either change log sequence number (fewer decimal digits),
# hexadecimal hash, or tag name.
# Mercurial will fall over long before the change log
# sequence numbers get to be 7 digits long.
if re.match('^[0-9]{7,}$', clname):
found = False
for r in hg_log(ui, repo, keyword="codereview.appspot.com/"+clname, limit=100, template="{node}\n").split():
rev = repo[r]
# Last line with a code review URL is the actual review URL.
# Earlier ones might be part of the CL description.
n = rev2clname(rev)
if n == clname:
found = True
break
if not found:
return "cannot find CL %s in local repository" % clname
else:
rev = repo[clname]
if not rev:
return "unknown revision %s" % clname
clname = rev2clname(rev)
if clname == "":
return "cannot find CL name in revision description"
# Create fresh CL and start with patch that would reverse the change.
vers = hg_node.short(rev.node())
cl = CL("new")
desc = str(rev.description())
if mode == "undo":
cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
else:
cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
v1 = vers
v0 = hg_node.short(rev.parents()[0].node())
if mode == "undo":
arg = v1 + ":" + v0
else:
vers = v0
arg = v0 + ":" + v1
patch = RunShell(["hg", "diff", "--git", "-r", arg])
else: # clpatch
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
if patch == emptydiff:
return "codereview issue %s has no diff" % clname
# find current hg version (hg identify)
ctx = repo[None]
parents = ctx.parents()
id = '+'.join([hg_node.short(p.node()) for p in parents])
# if version does not match the patch version,
# try to update the patch line numbers.
if vers != "" and id != vers:
# "vers in repo" gives the wrong answer
# on some versions of Mercurial. Instead, do the actual
# lookup and catch the exception.
try:
repo[vers].description()
except:
return "local repository is out of date; sync to get %s" % (vers)
patch1, err = portPatch(repo, patch, vers, id)
if err != "":
if not opts["ignore_hgpatch_failure"]:
return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
else:
patch = patch1
argv = ["hgpatch"]
if opts["no_incoming"] or mode == "backport":
argv += ["--checksync=false"]
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
except:
return "hgpatch: " + ExceptionDetail() + "\nInstall hgpatch with:\n$ go get code.google.com/p/go.codereview/cmd/hgpatch\n"
out, err = cmd.communicate(patch)
if cmd.returncode != 0 and not opts["ignore_hgpatch_failure"]:
return "hgpatch failed"
cl.local = True
cl.files = out.strip().split()
if not cl.files and not opts["ignore_hgpatch_failure"]:
return "codereview issue %s has no changed files" % clname
files = ChangedFiles(ui, repo, [])
extra = Sub(cl.files, files)
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
if mode == "undo":
err = EditCL(ui, repo, cl)
if err != "":
return "CL created, but error editing: " + err
cl.Flush(ui, repo)
else:
ui.write(cl.PendingText() + "\n")
# portPatch rewrites patch from being a patch against
# oldver to being a patch against newver.
def portPatch(repo, patch, oldver, newver):
lines = patch.splitlines(True) # True = keep \n
delta = None
for i in range(len(lines)):
line = lines[i]
if line.startswith('--- a/'):
file = line[6:-1]
delta = fileDeltas(repo, file, oldver, newver)
if not delta or not line.startswith('@@ '):
continue
# @@ -x,y +z,w @@ means the patch chunk replaces
# the original file's line numbers x up to x+y with the
# line numbers z up to z+w in the new file.
# Find the delta from x in the original to the same
# line in the current version and add that delta to both
# x and z.
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
return None, "error parsing patch line numbers"
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
d, err = lineDelta(delta, n1, len1)
if err != "":
return "", err
n1 += d
n2 += d
lines[i] = "@@ -%d,%d +%d,%d @@\n" % (n1, len1, n2, len2)
newpatch = ''.join(lines)
return newpatch, ""
# fileDelta returns the line number deltas for the given file's
# changes from oldver to newver.
# The deltas are a list of (n, len, newdelta) triples that say
# lines [n, n+len) were modified, and after that range the
# line numbers are +newdelta from what they were before.
def fileDeltas(repo, file, oldver, newver):
cmd = ["hg", "diff", "--git", "-r", oldver + ":" + newver, "path:" + file]
data = RunShell(cmd, silent_ok=True)
deltas = []
for line in data.splitlines():
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
continue
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
deltas.append((n1, len1, n2+len2-(n1+len1)))
return deltas
# lineDelta finds the appropriate line number delta to apply to the lines [n, n+len).
# It returns an error if those lines were rewritten by the patch.
def lineDelta(deltas, n, len):
d = 0
for (old, oldlen, newdelta) in deltas:
if old >= n+len:
break
if old+len > n:
return 0, "patch and recent changes conflict"
d = newdelta
return d, ""
@hgcommand
def download(ui, repo, clname, **opts):
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff, downloaded from the code review server.
"""
if codereview_disabled:
return codereview_disabled
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
ui.write(cl.EditorText() + "\n")
ui.write(patch + "\n")
return
#######################################################################
# hg file
@hgcommand
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
if codereview_disabled:
return codereview_disabled
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
#######################################################################
# hg gofmt
@hgcommand
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
return codereview_disabled
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
return "no modified go files"
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if os.spawnvp(os.P_WAIT, "gofmt", cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return
def gofmt_required(files):
return [f for f in files if (not f.startswith('test/') or f.startswith('test/bench/')) and f.endswith('.go')]
#######################################################################
# hg mail
@hgcommand
def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
if codereview_disabled:
return codereview_disabled
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc:
return "no reviewers listed in CL"
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
if cl.files == []:
return "no changed files, not sending mail"
cl.Mail(ui, repo)
#######################################################################
# hg p / hg pq / hg ps / hg pending
@hgcommand
def ps(ui, repo, *pats, **opts):
"""alias for hg p --short
"""
opts['short'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pq(ui, repo, *pats, **opts):
"""alias for hg p --quick
"""
opts['quick'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pending(ui, repo, *pats, **opts):
"""show pending changes
Lists pending changes followed by a list of unassigned but modified files.
"""
if codereview_disabled:
return codereview_disabled
quick = opts.get('quick', False)
short = opts.get('short', False)
m = LoadAllCL(ui, repo, web=not quick and not short)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
if short:
ui.write(name + "\t" + line1(cl.desc) + "\n")
else:
ui.write(cl.PendingText(quick=quick) + "\n")
if short:
return
files = DefaultFiles(ui, repo, [])
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
#######################################################################
# hg submit
def need_sync():
raise hg_util.Abort("local repository out of date; must sync before submit")
@hgcommand
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
if codereview_disabled:
return codereview_disabled
# We already called this on startup but sometimes Mercurial forgets.
set_mercurial_encoding_to_utf8()
if not opts["no_incoming"] and hg_incoming(ui, repo):
need_sync()
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
user = None
if cl.copied_from:
user = cl.copied_from
userline = CheckContributor(ui, repo, user)
typecheck(userline, str)
about = ""
if cl.reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in cl.reviewer]) + "\n"
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer:
return "no reviewers listed in CL"
if not cl.local:
return "cannot submit non-local CL"
# upload, to sync current patch and also get change number if CL is new.
if not cl.copied_from:
cl.Upload(ui, repo, gofmt_just_warn=True)
# check gofmt for real; allowed upload to warn in order to save CL.
cl.Flush(ui, repo)
CheckFormat(ui, repo, cl.files)
about += "%s%s\n" % (server_url_base, cl.name)
if cl.copied_from:
about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
typecheck(about, str)
if not cl.mailed and not cl.copied_from: # in case this is TBR
cl.Mail(ui, repo)
# submit changes locally
message = cl.desc.rstrip() + "\n\n" + about
typecheck(message, str)
set_status("pushing " + cl.name + " to remote server")
if hg_outgoing(ui, repo):
raise hg_util.Abort("local repository corrupt or out-of-phase with remote: found outgoing changes")
old_heads = len(hg_heads(ui, repo).split())
global commit_okay
commit_okay = True
ret = hg_commit(ui, repo, *['path:'+f for f in cl.files], message=message, user=userline)
commit_okay = False
if ret:
return "nothing changed"
node = repo["-1"].node()
# push to remote; if it fails for any reason, roll back
try:
new_heads = len(hg_heads(ui, repo).split())
if old_heads != new_heads and not (old_heads == 0 and new_heads == 1):
# Created new head, so we weren't up to date.
need_sync()
# Push changes to remote. If it works, we're committed. If not, roll back.
try:
hg_push(ui, repo)
except hg_error.Abort, e:
if e.message.find("push creates new heads") >= 0:
# Remote repository had changes we missed.
need_sync()
raise
except:
real_rollback()
raise
# We're committed. Upload final patch, close review, add commit message.
changeURL = hg_node.short(node)
url = ui.expandpath("default")
m = re.match("(^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/?)" + "|" +
"(^https?://([^@/]+@)?code\.google\.com/p/([^/.]+)(\.[^./]+)?/?)", url)
if m:
if m.group(1): # prj.googlecode.com/hg/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(3), changeURL)
elif m.group(4) and m.group(7): # code.google.com/p/prj.subrepo/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s&repo=%s" % (m.group(6), changeURL, m.group(7)[1:])
elif m.group(4): # code.google.com/p/prj/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(6), changeURL)
else:
print >>sys.stderr, "URL: ", url
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + message
# When posting, move reviewers to CC line,
# so that the issue stops showing up in their "My Issues" page.
PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
c = repo[None]
if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
ui.write("switching from %s to default branch.\n" % releaseBranch)
err = hg_clean(repo, "default")
if err:
return err
return None
#######################################################################
# hg sync
@hgcommand
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
if codereview_disabled:
return codereview_disabled
if not opts["local"]:
err = hg_pull(ui, repo, update=True)
if err:
return err
sync_changes(ui, repo)
def sync_changes(ui, repo):
# Look through recent change log descriptions to find
# potential references to http://.*/our-CL-number.
# Double-check them by looking at the Rietveld log.
for rev in hg_log(ui, repo, limit=100, template="{node}\n").split():
desc = repo[rev].description().strip()
for clname in re.findall('(?m)^http://(?:[^\n]+)/([0-9]+)$', desc):
if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
cl, err = LoadCL(ui, repo, clname, web=False)
if err != "":
ui.warn("loading CL %s: %s\n" % (clname, err))
continue
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
# Remove files that are not modified from the CLs in which they appear.
all = LoadAllCL(ui, repo, web=False)
changed = ChangedFiles(ui, repo, [])
for cl in all.values():
extra = Sub(cl.files, changed)
if extra:
ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
for f in extra:
ui.warn("\t%s\n" % (f,))
cl.files = Sub(cl.files, extra)
cl.Flush(ui, repo)
if not cl.files:
if not cl.copied_from:
ui.warn("CL %s has no files; delete (abandon) with hg change -d %s\n" % (cl.name, cl.name))
else:
ui.warn("CL %s has no files; delete locally with hg change -D %s\n" % (cl.name, cl.name))
return
#######################################################################
# hg upload
@hgcommand
def upload(ui, repo, name, **opts):
"""upload diffs to the code review server
Uploads the current modifications for a given change to the server.
"""
if codereview_disabled:
return codereview_disabled
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
return err
if not cl.local:
return "cannot upload non-local change"
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return
#######################################################################
# Table of commands, supplied to Mercurial for installation.
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
('p', 'pending', None, 'print pending summary to standard output'),
],
"[-d | -D] [-i] [-o] change# or FILE ..."
),
"^clpatch": (
clpatch,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# Would prefer to call this codereview-login, but then
# hg help codereview prints the help for this command
# instead of the help for the extension.
"code-login": (
code_login,
[],
"",
),
"^download": (
download,
[],
"change#"
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^gofmt": (
gofmt,
[
('l', 'list', None, 'list files that would change, but do not edit them'),
],
"FILE ..."
),
"^pending|p": (
pending,
[
('s', 'short', False, 'show short result form'),
('', 'quick', False, 'do not consult codereview server'),
],
"[FILE ...]"
),
"^ps": (
ps,
[],
"[FILE ...]"
),
"^pq": (
pq,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + hg_commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^release-apply": (
release_apply,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# TODO: release-start, release-tag, weekly-tag
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
] + hg_commands.walkopts + hg_commands.commitopts + hg_commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[
('', 'local', None, 'do not pull changes from remote repository')
],
"[--local]",
),
"^undo": (
undo,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Mercurial extension initialization
def norollback(*pats, **opts):
"""(disabled when using this extension)"""
raise hg_util.Abort("codereview extension enabled; use undo instead of rollback")
codereview_init = False
def reposetup(ui, repo):
global codereview_disabled
global defaultcc
# reposetup gets called both for the local repository
# and also for any repository we are pulling or pushing to.
# Only initialize the first time.
global codereview_init
if codereview_init:
return
codereview_init = True
# Read repository-specific options from lib/codereview/codereview.cfg or codereview.cfg.
root = ''
try:
root = repo.root
except:
# Yes, repo might not have root; see issue 959.
codereview_disabled = 'codereview disabled: repository has no root'
return
repo_config_path = ''
p1 = root + '/lib/codereview/codereview.cfg'
p2 = root + '/codereview.cfg'
if os.access(p1, os.F_OK):
repo_config_path = p1
else:
repo_config_path = p2
try:
f = open(repo_config_path)
for line in f:
if line.startswith('defaultcc:'):
defaultcc = SplitCommaSpace(line[len('defaultcc:'):])
if line.startswith('contributors:'):
global contributorsURL
contributorsURL = line[len('contributors:'):].strip()
except:
codereview_disabled = 'codereview disabled: cannot open ' + repo_config_path
return
remote = ui.config("paths", "default", "")
if remote.find("://") < 0:
raise hg_util.Abort("codereview: default path '%s' is not a URL" % (remote,))
InstallMatch(ui, repo)
RietveldSetup(ui, repo)
# Disable the Mercurial commands that might change the repository.
# Only commands in this extension are supposed to do that.
ui.setconfig("hooks", "precommit.codereview", precommithook)
# Rollback removes an existing commit. Don't do that either.
global real_rollback
real_rollback = repo.rollback
repo.rollback = norollback
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
from HTMLParser import HTMLParser
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
def JSONGet(ui, path):
try:
data = MySend(path, force_auth=False)
typecheck(data, str)
d = fix_json(json.loads(data))
except:
ui.warn("JSONGet %s: %s\n" % (path, ExceptionDetail()))
return None
return d
# Clean up json parser output to match our expectations:
# * all strings are UTF-8-encoded str, not unicode.
# * missing fields are missing, not None,
# so that d.get("foo", defaultvalue) works.
def fix_json(x):
if type(x) in [str, int, float, bool, type(None)]:
pass
elif type(x) is unicode:
x = x.encode("utf-8")
elif type(x) is list:
for i in range(len(x)):
x[i] = fix_json(x[i])
elif type(x) is dict:
todel = []
for k in x:
if x[k] is None:
todel.append(k)
else:
x[k] = fix_json(x[k])
for k in todel:
del x[k]
else:
raise hg_util.Abort("unknown type " + str(type(x)) + " in fix_json")
if type(x) is str:
x = x.replace('\r\n', '\n')
return x
def IsRietveldSubmitted(ui, clname, hex):
dict = JSONGet(ui, "/api/" + clname + "?messages=true")
if dict is None:
return False
for msg in dict.get("messages", []):
text = msg.get("text", "")
m = re.match('\*\*\* Submitted as [^*]*?([0-9a-f]+) \*\*\*', text)
if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
return True
return False
def IsRietveldMailed(cl):
for msg in cl.dict.get("messages", []):
if msg.get("text", "").find("I'd like you to review this change") >= 0:
return True
return False
def DownloadCL(ui, repo, clname):
set_status("downloading CL " + clname)
cl, err = LoadCL(ui, repo, clname, web=True)
if err != "":
return None, None, None, "error loading CL %s: %s" % (clname, err)
# Find most recent diff
diffs = cl.dict.get("patchsets", [])
if not diffs:
return None, None, None, "CL has no patch sets"
patchid = diffs[-1]
patchset = JSONGet(ui, "/api/" + clname + "/" + str(patchid))
if patchset is None:
return None, None, None, "error loading CL patchset %s/%d" % (clname, patchid)
if patchset.get("patchset", 0) != patchid:
return None, None, None, "malformed patchset information"
vers = ""
msg = patchset.get("message", "").split()
if len(msg) >= 3 and msg[0] == "diff" and msg[1] == "-r":
vers = msg[2]
diff = "/download/issue" + clname + "_" + str(patchid) + ".diff"
diffdata = MySend(diff, force_auth=False)
# Print warning if email is not in CONTRIBUTORS file.
email = cl.dict.get("owner_email", "")
if not email:
return None, None, None, "cannot find owner for %s" % (clname)
him = FindContributor(ui, repo, email)
me = FindContributor(ui, repo, None)
if him == me:
cl.mailed = IsRietveldMailed(cl)
else:
cl.copied_from = email
return cl, vers, diffdata, ""
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend1(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated and force_auth:
self._Authenticate()
if request_path is None:
return
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
# Translate \r\n into \n, because Rietveld doesn't.
response = response.replace('\r\n', '\n')
# who knows what urllib will give us
if type(response) == unicode:
response = response.encode("utf-8")
typecheck(response, str)
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(ustr(MySend(url))) # f.feed wants unicode
f.close()
# convert back to utf-8 to restore sanity
m = {}
for k,v in f.map.items():
m[k.encode("utf-8")] = v.replace("\r\n", "\n").encode("utf-8")
return m
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=False, private=False):
set_status("uploading change to description")
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed:
form_fields['closed'] = "checked"
if private:
form_fields['private'] = "checked"
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
set_status("uploading message")
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail:
form_fields['send_mail'] = "checked"
else:
del form_fields['send_mail']
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1' # Don't include draft comments
if reviewers is not None or cc is not None:
form_fields['message_only'] = '' # Must set '' in order to override cc/reviewer
ctype = "applications/x-www-form-urlencoded"
body = urllib.urlencode(form_fields)
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global force_google_account
global rpc
global server
global server_url_base
global upload_options
global verbosity
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
server_url_base = "http://" + server + "/"
testing = ui.config("codereview", "testing")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = None
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.revision = None
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
global releaseBranch
tags = repo.branchtags().keys()
if 'release-branch.go10' in tags:
# NOTE(rsc): This tags.sort is going to get the wrong
# answer when comparing release-branch.go9 with
# release-branch.go10. It will be a while before we care.
raise hg_util.Abort('tags.sort needs to be fixed for release-branch.go10')
tags.sort()
for t in tags:
if t.startswith('release-branch.go'):
releaseBranch = t
#######################################################################
# http://codereview.appspot.com/static/upload.py, heavily edited.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = [
'application/javascript',
'application/x-javascript',
'application/x-freemind'
]
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" % (self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, "The user's access to the service has been disabled."
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Disable status prints so they don't obscure the password prompt.
global global_status
st = global_status
global_status = None
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
# Put status back.
global_status = st
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host, save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True, env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines, env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
set_status("uploading " + filename)
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [
("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)])
response_body = rpc_server.Send(url, body, content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
# Don't want to spawn too many threads, nor do we want to
# hit Rietveld too hard, or it will start serving 500 errors.
# When 8 works, it's no better than 4, and sometimes 8 is
# too many for Rietveld to handle.
MAX_PARALLEL_UPLOADS = 4
sema = threading.BoundedSemaphore(MAX_PARALLEL_UPLOADS)
upload_threads = []
finished_upload_threads = []
class UploadFileThread(threading.Thread):
def __init__(self, args):
threading.Thread.__init__(self)
self.args = args
def run(self):
UploadFile(*self.args)
finished_upload_threads.append(self)
sema.release()
def StartUploadFile(*args):
sema.acquire()
while len(finished_upload_threads) > 0:
t = finished_upload_threads.pop()
upload_threads.remove(t)
t.join()
t = UploadFileThread(args)
upload_threads.append(t)
t.start()
def WaitForUploads():
for t in upload_threads:
t.join()
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
StartUploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
StartUploadFile(filename, file_id, new_content, is_binary, status, False)
WaitForUploads()
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class FakeMercurialUI(object):
def __init__(self):
self.quiet = True
self.output = ''
def write(self, *args, **opts):
self.output += ' '.join(args)
def copy(self):
return self
def status(self, *args, **opts):
pass
def formatter(self, topic, opts):
from mercurial.formatter import plainformatter
return plainformatter(self, topic, opts)
def readconfig(self, *args, **opts):
pass
def expandpath(self, *args, **opts):
return global_ui.expandpath(*args, **opts)
def configitems(self, *args, **opts):
return global_ui.configitems(*args, **opts)
def config(self, *args, **opts):
return global_ui.config(*args, **opts)
use_hg_shell = False # set to True to shell out to hg always; slower
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, ui, repo):
super(MercurialVCS, self).__init__(options)
self.ui = ui
self.repo = repo
self.status = None
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo.root)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
mqparent, err = RunShellWithReturnCode(['hg', 'log', '--rev', 'qparent', '--template={node}'])
if not err and mqparent != "":
self.base_rev = mqparent
else:
out = RunShell(["hg", "parents", "-q"], silent_ok=True).strip()
if not out:
# No revisions; use 0 to mean a repository with nothing.
out = "0:0"
self.base_rev = out.split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def get_hg_status(self, rev, path):
# We'd like to use 'hg status -C path', but that is buggy
# (see http://mercurial.selenic.com/bts/issue3023).
# Instead, run 'hg status -C' without a path
# and skim the output for the path we want.
if self.status is None:
if use_hg_shell:
out = RunShell(["hg", "status", "-C", "--rev", rev])
else:
fui = FakeMercurialUI()
ret = hg_commands.status(fui, self.repo, *[], **{'rev': [rev], 'copies': True})
if ret:
raise hg_util.Abort(ret)
out = fui.output
self.status = out.splitlines()
for i in range(len(self.status)):
# line is
# A path
# M path
# etc
line = to_slash(self.status[i])
if line[2:] == path:
if i+1 < len(self.status) and self.status[i+1][:2] == ' ':
return self.status[i:i+2]
return self.status[i:i+1]
raise hg_util.Abort("no status for " + path)
def GetBaseFile(self, filename):
set_status("inspecting " + filename)
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
out = self.get_hg_status(self.base_rev, relpath)
status, what = out[0].split(' ', 1)
if len(out) > 1 and status == "A" and what == relpath:
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
if use_hg_shell:
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath], silent_ok=True)
else:
base_content = str(self.repo[base_rev][oldrelpath].data())
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content and use_hg_shell:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = to_slash(temp_filename.strip())
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
set_status("uploading patch for " + patch[0])
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
| Python |
#!/usr/bin/python
# Imports
import os, pygame
import opencv.adaptors as adaptors
# Basepaths
BASEDIR = 'capture'
TMPDIR = '/tmp/capture'
DATADIR = 'data'
def load_image(filename):
"""Loads and returns an image with pygame.
Parameters: string filename - location of the image to be loaded."""
try:
image = pygame.image.load(filename)
except pygame.error:
print "Cannot load image:", filename
return image.convert_alpha()
def convert_image(img):
"""Converts an openCV image to a pygame image.
Parameters: IplImage img - image"""
array = adaptors.Ipl2NumPy(img)
surf = pygame.surfarray.make_surface(array)
surf = pygame.transform.rotate(surf, -90)
surf = pygame.transform.flip(surf, 1, 0)
return surf
def load_sound(filename):
"""Loads and returns a sound with pygame.
Parameters: string filename - location of the sound to be loaded."""
class NoneSound:
def play(self):
pass
if not pygame.mixer:
return NoneSound()
filename = os.path.join(DATADIR, filename)
try:
sound = pygame.mixer.Sound(filename)
except pygame.error:
print 'Cannot load sound: ', filename
return sound
def mouse_collision(rect):
"""Returns True if the position of the mouse is located within the given rectangle.
Parameters: Rect rect - pygame rectangle of (usually) a button."""
m_width = 10
m_height = 10
m_x, m_y = pygame.mouse.get_pos()
if rect.x - m_width < m_x < rect.x + rect.width and rect.y - m_height < m_y < rect.y + rect.height:
return True
| Python |
#!/usr/bin/python
# Imports
import sys, os
import pygame
import opencv
from opencv.cv import *
from opencv.highgui import *
from data import *
import scene
from inputbox import Inputbox
from conf import countdown_seconds, pictures_taken, max_amount_of_pics, amount_of_matches
class FaceDetect(object):
"""Used to detect, capture, save, update and match faces.
Can also draw a live stream of the (web)cam."""
def __init__(self, screen, sql, scene = None):
"""Initialization of FaceDetect.
Creates the necessary global parameters and creates 2 directories when not existing.
Parameters: display screen - drawing screen
SQL sql - instance of SQL()
Scene scene - instance of Scene()"""
print 'Initializing FaceDetect'
self.storage = cvCreateMemStorage(0)
self.capture = cvCreateCameraCapture(0)
face_cascade_name = "haarcascade_frontalface_default.xml"
face_cascade_name = os.path.join(DATADIR, face_cascade_name)
self.face_cascade = cvLoadHaarClassifierCascade(face_cascade_name, cvSize(0, 0))
self.cam_width = 320
self.cam_height = 240
cvSetCaptureProperty(self.capture, CV_CAP_PROP_FRAME_WIDTH, self.cam_width)
cvSetCaptureProperty (self.capture, CV_CAP_PROP_FRAME_HEIGHT, self.cam_height)
self.modulo = 10
self.seconds = countdown_seconds
self.may_match = False
self.keepGoing = True
self.scene = scene
self.users = []
self.captures = []
self.user_id = None
self.img_x_scale = 1.0
self.img_y_scale = 1.3
self.screen = screen
self.sql = sql
if not os.path.isdir(BASEDIR):
os.mkdir(BASEDIR)
if not os.path.isdir(TMPDIR):
os.mkdir(TMPDIR)
def equalizeHist(self, img):
"""Shrinks, normalizes the brightness and increases contrast of the image.
Returns an IplImage.
Parameters: IplImage img - image of frame of the (web)cam"""
gray = cvCreateImage(cvSize(img.width,img.height), 8, 1)
small_img = cvCreateImage(cvSize(cvRound (img.width/self.img_x_scale),
cvRound(img.height/self.img_y_scale)), 8, 1)
cvCvtColor(img, gray, CV_RGB2GRAY)
cvResize(gray, small_img)
cvEqualizeHist(small_img, small_img)
cvClearMemStorage(self.storage)
return small_img
def detect_face(self, img):
"""Tries to detect a face and if detected drawing an Ellipse around it.
Parameters: IplImage img - image of frame of the (web)cam"""
small_img = self.equalizeHist(img)
faces = cvHaarDetectObjects(small_img, self.face_cascade, self.storage,
1.1, 3, 0, cvSize(50, 50))
if faces:
for r in faces:
x, y = r.x - 15, r.y - 15
width, height = int(r.x + (r.width * self.img_x_scale) + 10), int(r.y + (r.height * self.img_y_scale) + 30)
pt1, pt2 = cvPoint(x, y), cvPoint(width, height)
cvEllipse(img, cvPoint(x + ((width - x ) / 2), y + ((height - y) / 2)), cvSize(((width - x ) / 2), ((height - y) / 2)), 0, 0, 360, CV_RGB(0,0,255), 1, 8, 0)
rect = cvRect(x, y, width - x, height - y)
return rect, img
return None, img
def get_list_max(self, index_list, count_list, matches_list):
"""Returns a list of users with the most matches.
Can only be used together with get_best_matches().
Parameters: list index_list - list of index numbers refering to the same index in matches_list
list count_list - list of count numbers how often each user occurs.
list matches_list - list of users (each matching user occurs once in this list)
The indexes of all lists correspond which each other. So index_list[0] = count_list[0] = match_list[0]"""
index = 0
if len(matches_list) > 1:
for cl in count_list:
if cl == max(count_list):
index_list.append(matches_list[index])
count_list.remove(count_list[index])
matches_list.remove(matches_list[index])
break
index += 1
elif len(matches_list) == 1:
index_list.append(matches_list[count_list.index(max(count_list))])
count_list.remove(count_list[index])
matches_list.remove(matches_list[index])
return index_list, count_list, matches_list
def get_best_matches(self, matches):
"""Returns the best matches based on how often the match is found in the list.
The amount returned can be specified in conf.py.
Parameters: list matches - list of all matching users, meaning that an user can occur more then once."""
count = 1
count_list = []
matches_list = []
index_list = []
for match in matches:
if match not in matches_list:
count_list.append(matches.count(match))
matches_list.append(match)
if len(matches) > 0:
while len(index_list) < amount_of_matches:
if len(matches_list) == 0:
break
else:
index_list, count_list, matches_list = self.get_list_max(index_list, count_list, matches_list)
return index_list
def get_most_recent_faces(self):
"""Returns all face_ids of the the three most recent pictures of each user."""
users = self.sql.get_all_users()
face_ids = []
for user in users:
user_id = user[0]
tmp_faces = self.sql.get_most_recent_faces(user_id)
for user_face in tmp_faces:
face_ids.append(user_face)
return face_ids
def matchTemplate(self, capture, src):
"""Compares a template against overlapped image regions.
Parameters: IplImage capture - capture image saved in '/tmp'.
IplImage src - source image which capture is compared to."""
# Setting image properties
temp_width = src.width - capture.width + 1
temp_height = src.height - capture.height + 1
# Create image for template matching
temp_diff = cvCreateImage(cvSize(temp_width, temp_height), IPL_DEPTH_32F, 1)
# Matching template
cvMatchTemplate(src, capture, temp_diff, CV_TM_CCORR)
# Getting value out of temp_diff
temp_diff = temp_diff[0]
return temp_diff
def matchShapes(self, capture, src):
"""Compares two shapes.
Parameters: IplImage capture - capture image saved in '/tmp'.
IplImage src - source image which capture is compared to."""
# Creating images for grayscale
src_gray = cvCreateImage(cvSize(src.width, src.height), src.depth,1)
cap_gray = cvCreateImage(cvSize(capture.width, capture.height), capture.depth, 1)
# Making images gray
cvCvtColor(src, src_gray, CV_RGB2GRAY)
cvCvtColor(capture, cap_gray, CV_RGB2GRAY)
# Matching shapes
shape_diff = cvMatchShapes(src_gray, cap_gray, 1)
return shape_diff
def match_all(self, face_ids):
"""Going either to the 'User List' window (when no matches were found)
or to the 'Match' window showing a variable amount of the found matches on the screen.
Matches all capture(s) with the three most recent pictures of each user.
Parameters: list face_ids - user faces that are going to be matches"""
allmatches = []
shape_results = []
temp_results = []
print 'Matching captures with most recent pictures of each user.'
for capture in self.captures:
shape_diff, temp_diff = self.match(capture)
print 'Shape_diff:', shape_diff
print 'Temp_diff:', temp_diff
if shape_diff != None:
shape_results = self.sql.get_closest_shape_diffs(shape_diff, face_ids)
for user_id in shape_results:
allmatches.append(self.sql.get_user(user_id = user_id))
if temp_diff != None:
temp_results = self.sql.get_closest_temp_diffs(temp_diff, face_ids)
for user_id in temp_results:
allmatches.append(self.sql.get_user(user_id = user_id))
list = self.get_best_matches(allmatches)
if allmatches == []:
self.scene.user_list_window(False)
else:
self.scene.match_window(list)
for user in allmatches:
print 'Matching user:', user
def match(self, img, src = 'fingerprint.png'):
"""Matches the image with the source image.
Returns the values that were created by matching.
Parameters: string img - name of image saved in '/tmp'.
string src - name of source image"""
shape_diff = None
temp_diff = None
try:
print 'Matching %s with %s' % (src, img)
capture = cvLoadImage(img)
src = cvLoadImage(os.path.join(DATADIR, src))
shape_diff = self.matchShapes(capture, src)
temp_diff = self.matchTemplate(capture, src)
except:
pass
return shape_diff, temp_diff
def save_tmp_image(self, img):
"""Saves the capture(s), taken during the login sequence, in '/tmp' so they can be used later on.
Parameters: IplImage img - image of frame of the (web)cam"""
try:
face, img = self.detect_face(img)
if face:
name = 1
self.filename = os.path.join(TMPDIR, str(name) + ".png")
while os.path.exists(self.filename):
name += 1
self.filename = os.path.join(TMPDIR, str(name) + ".png")
ROI = cvGetSubRect(img, face)
if self.detect_face(ROI):
dst = cvCreateImage(cvSize(95, 135), ROI.depth, ROI.nChannels)
cvResize(ROI, dst)
width = dst.width / 2
height = dst.height / 2
print width, height
while width < (dst.width / 2 + 25) and height < (dst.height / 2 + 25):
cvEllipse(dst, cvPoint(dst.width / 2, dst.height / 2), cvSize(width, height), 0, 0, 360, CV_RGB(0,255,0), 2, 8, 0)
width += 1
height += 1
cvSaveImage(self.filename, dst)
self.captures.append(self.filename)
print "Saving temp_pic: ", self.filename
else:
print "No face detected"
else:
print "No face detected"
except Exception, info:
print 'Error - Saving temp_pic:', info
def save_diffs(self, tmp_filename, pic_name = None):
"""Saves the diff values in users_faces and the picture in 'capture/'
The maximum rows allowed is specified in conf.py, when reached the oldest will get replaced by a newer.
Parameters: string tmp_filename - name of image saved in '/tmp'.
string pic_name - if None then the pic_name is created else it is used for updating."""
user_id = self.user_id
shape_diff, temp_diff = self.match(tmp_filename)
img = cvLoadImage(tmp_filename)
if not pic_name:
pic_name = str(user_id) + "_" + str(self.sql.count_users_faces(user_id) + 1) + ".png"
filename = os.path.join(BASEDIR, pic_name)
if self.sql.count_users_faces(user_id) == max_amount_of_pics:
print 'Maximum amount (%s) of pictures reached.' % max_amount_of_pics
oldest_face_id = self.sql.get_oldest_face_id(user_id)
old_pic_name = self.sql.get_pic_name(oldest_face_id)
filename = os.path.join(BASEDIR, old_pic_name)
self.sql.update_user_face(shape_diff, temp_diff, old_pic_name, oldest_face_id)
else:
self.sql.create_user_face(shape_diff, temp_diff, pic_name, user_id)
cvSaveImage(filename, img)
print "Saved picture path:", filename
def count_down(self):
"""Counts from the number specified in conf.py till 0.
The numbers are shown on the screen when used."""
font = pygame.font.Font(None, 100)
if not self.draw_seconds == 0:
self.count += 1
ren = font.render(str(self.draw_seconds), 1, (255, 255, 255))
self.screen.blit(ren, (self.screen.get_width() / 2 - ren.get_width() / 2, self.screen.get_height() / 2 - ren.get_height() / 2))
if (self.count % self.modulo) == 0:
self.draw_seconds -= 1
def reset_countdown(self):
"""Resets the countdown to it's original number of seconds."""
self.count = 1
self.draw_seconds = self.seconds
def cancel_countdown(self):
"""Cancels the countdown by setting the amount of seconds left to 0."""
self.count = None
self.draw_seconds = 0
def take_pictures(self, frame):
"""Works together with the countdown and takes a variable amount of pictures after the countdown is complete.
The amount of pictures taken can be modified in conf.py.
Parameters: IplImage frame - frame of the (web)cam"""
if self.count:
if self.count >= (self.modulo * self.seconds) and self.count <= (self.modulo * (self.seconds + pictures_taken)):
if self.count >= (self.modulo * (self.seconds + 1)):
if (self.count % self.modulo) == 0:
self.save_tmp_image(frame)
if (self.count % (self.modulo * (self.seconds + pictures_taken))) == 0:
self.may_match = True
self.count += 1
elif self.may_match:
self.may_match = False
face_ids = self.get_most_recent_faces()
self.match_all(face_ids)
def set_user_id(self, user_id):
"""Sets the global user_id parameter.
Parameters: int user_id - user identification code"""
print 'user_id set to', user_id
self.user_id = user_id
def draw_cam(self):
"""Draws a frame of the (web)cam on the screen.
When used in a loop it looks like a live stream.
Returns a modified frame of the (web)cam."""
small_frame = self.get_cam_frame()
face, img = self.detect_face(small_frame)
surf = convert_image(img)
self.screen.blit(surf, ((self.screen.get_width() / 2) - (img.width / 2), (self.screen.get_height() / 3) - (img.height / 2)))
return small_frame
def login(self):
"""Returns the frame of the (web)cam while counting down and taking picture(s) afterwards. """
small_frame = self.get_cam_frame()
self.count_down()
self.take_pictures(small_frame)
return small_frame
def get_cam_frame(self):
"""Creates and returns the actual (web)cam frame."""
if not self.face_cascade:
print "Error - Could not load classifier cascade"
sys.exit(-1)
frame = cvQueryFrame(self.capture)
small_frame = cvCreateImage(cvSize(self.cam_width, self.cam_height), frame.depth, frame.nChannels)
cvResize(frame, small_frame)
cvFlip(small_frame, small_frame, 1)
return small_frame
| Python |
#!/usr/bin/python
# Imports
import os
import pygame
import scene
# Centers the window
os.environ['SDL_VIDEO_CENTERED'] = '1'
# Initialization of pygame
pygame.init()
# Setting a display with title.
size = 800, 600
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Face Recognition")
print 'Starting Facerecognition.'
# Starts the program
scene.main(screen)
| Python |
#!/usr/bin/python
# Imports
import pygame, os, sys
from pygame import string
import MySQLdb, _mysql_exceptions
from conf import db_host, db_user, db_passwd
class SQL(object):
"""Contains functions to create, update, select or check things through the database."""
def __init__(self):
"""Initialization of SQL."""
print 'Initializing SQL'
self.connection = False
def get_closest_shape_diffs(self, shape_diff, face_ids):
"""Returns the user_ids of the users that have the closest shape_diff compared to the incoming shape_diff
and where the face_id(s) are equal to the incoming once.
Parameters: float shape_diff - return value of faceDetect.matchShapes()
list face_ids - list of face_id's of users_faces."""
query = []
query.append("SELECT user_id FROM test_db.users_faces WHERE")
for face_id in face_ids:
query.append(" face_id = %s OR" % face_id)
last_face_id = query[len(query) - 1]
last_face_id = last_face_id[0:-2]
query.remove(query[len(query) - 1])
query.append(last_face_id)
query.append("ORDER BY ABS(%s - shape_diff) LIMIT 5" % shape_diff)
query = string.join(query,"")
print 'Getting user_ids of closest shape_diff where face_ids = face_ids:', query
try:
self.cursor.execute(query)
rows = self.cursor.fetchall()
return rows
except Exception, info:
print "Error - Getting user_ids of closest shape_diff where face_ids = face_ids:", info
def get_closest_temp_diffs(self, temp_diff, face_ids):
"""Returns the user_ids of the users that have the closest temp_diff compared to the incoming temp_diff
and where the face_id(s) are equal to the incoming once.
Parameters: float shape_diff - return value of faceDetect.matchTemplate()
list face_ids - list of face_id's of users_faces."""
query = []
query.append("SELECT user_id FROM test_db.users_faces WHERE")
for face_id in face_ids:
query.append(" face_id = %s OR" % face_id)
last_face_id = query[len(query) - 1]
last_face_id = last_face_id[0:-2]
query.remove(query[len(query) - 1])
query.append(last_face_id)
query.append("ORDER BY ABS(%s - temp_diff) LIMIT 5" % temp_diff)
query = string.join(query,"")
print 'Getting user_ids of closest temp_diff where face_ids = face_ids:', query
try:
self.cursor.execute(query)
rows = self.cursor.fetchall()
return rows
except Exception, info:
print "Error - Getting user_ids of closest temp_diff where face_ids = face_ids:", info
def get_oldest_face_id(self, user_id):
"""Returns the oldest face_id of an user.
Parameters: int user_id - user identification code"""
query = "SELECT face_id FROM test_db.users_faces WHERE user_id = %s AND datetime = (SELECT MIN(datetime) FROM test_db.users_faces)" % user_id
print "Getting the oldest face_id of an user:", query
try:
self.cursor.execute(query)
row = self.cursor.fetchall()
return row[0][0]
except Exception, info:
print 'Error - Getting the oldest face_id of an user:', info
def get_most_recent_faces(self, user_id):
"""Returns the three most recent face_ids of an user.
Parameters: int user_id - user identification code"""
query = "SELECT face_id FROM test_db.users_faces WHERE user_id = %s ORDER BY datetime DESC LIMIT 3" % (user_id)
print 'Getting three most recent faces:', query
try:
self.cursor.execute(query)
rows = self.cursor.fetchall()
return rows
except Exception, info:
print 'Error - Getting three most recent faces:', info
def get_pic_name(self, face_id):
"""Returns the picture name of the given face_id.
Parameters: int face_id - user_face identication code"""
query = "SELECT pic_name FROM test_db.users_faces WHERE face_id = %s" % face_id
print "Getting the pic_name of a face_id:", query
try:
self.cursor.execute(query)
row = self.cursor.fetchone()
return row[0]
except Exception, info:
print "Error - Getting the pic_name of a face_id:", info
def get_user(self, user_id = None, login_name = None):
"""Returns the user's user_id, title, first name and last name. Searches by user_id.
Parameters: int user_id - user identification code
string login_name - login name of an user
Either of one should always be None."""
if login_name:
query = "SELECT user_id, title, first_name, last_name FROM test_db.users WHERE login_name = '%s'" % login_name
elif user_id:
query = "SELECT user_id, title, first_name, last_name FROM test_db.users WHERE user_id = %s" % user_id
print "Getting user:", query
try:
self.cursor.execute(query)
row = self.cursor.fetchone()
return row
except Exception, info:
print "Error - Getting user:", info
def get_user_face(self, pic_name):
"""Returns the face_id of a pic_name.
Parameters: string pic_name - picture name
Only used in conf.py"""
query = "SELECT face_id FROM test_db.users_faces WHERE pic_name = '%s'" % pic_name
print 'Getting user_face:', query
try:
self.cursor.execute(query)
row = self.cursor.fetchone()
return row[0]
except Exception, info:
print 'Error - Getting user_face:', info
def get_all_users(self):
"""Returns the title and name of all users."""
query = "SELECT user_id, title, first_name, last_name FROM test_db.users ORDER BY last_name"
print "Get all users:", query
try:
self.cursor.execute(query)
rows = self.cursor.fetchall()
return rows
except Exception, info:
print "Error - Get all users:", info
def get_user_list(self):
"""Returns all users excluding the test_users.
Only created for prototype to avoid showing all the test users."""
query = "SELECT user_id, title, first_name, last_name FROM test_db.users WHERE first_name != 'User'"
print "Get user list:", query
try:
self.cursor.execute(query)
rows = self.cursor.fetchall()
return rows
except Exception, info:
print "Error - Get user list:", info
def get_last_inserted_user_id(self):
"""Returns the user_id of the last inserted user."""
query = "SELECT user_id FROM test_db.users ORDER BY user_id DESC LIMIT 1"
print 'Getting last inserted user_id:', query
try:
self.cursor.execute(query)
row = self.cursor.fetchone()
return row[0]
except Exception, info:
print 'Error - Getting last inserted user_id:', info
def get_max_user_id(self):
"""Returns the highest user_id."""
query = "SELECT MAX(user_id) FROM test_db.users"
print 'Getting the highest user_id', query
try:
self.cursor.execute(query)
row = self.cursor.fetchone()
return row[0]
except Exception, info:
print 'Error - Getting the highest user_id', info
def user_exists(self, user_id = None, login_name = None):
"""Checks if the given user_id OR login_name exists.
Parameters: int user_id - user identification code
string login_name - login name of an user
Either of one should always be None."""
query = ''
if login_name:
query = "SELECT * FROM test_db.users WHERE login_name = '%s'" % (login_name)
elif user_id:
query = "SELECT * FROM test_db.users WHERE user_id = %s" % (user_id)
print "User exists:", query
try:
self.cursor.execute(query)
row = self.cursor.fetchone()
if row == None:
return False
return True
except Exception, info:
print "Error - User exists:", info
def user_face_exists(self, pic_name):
"""Checks if the given pic_name exists.
Parameters: string pic_name - picture name
Only used in conf.py"""
query = "SELECT * FROM test_db.users_faces WHERE pic_name = '%s'" % pic_name
print 'User_face exists:', query
try:
self.cursor.execute(query)
row = self.cursor.fetchone()
if row == None:
return False
return True
except Expcetion, info:
print "Error - User_face exists:", info
def count_users_faces(self, user_id):
"""Return the amount of users_faces.
Parameters: int user_id - user identification code"""
query = "SELECT count(*) FROM test_db.users_faces WHERE user_id = %s" % user_id
print "Count users_faces:", query
try:
self.cursor.execute(query)
row = self.cursor.fetchone()
return row[0]
except Exception, info:
print "Error - Count users_faces:", info
def update_user_face(self, shape_diff, temp_diff, pic_name, face_id):
"""Updates an user_face record.
Parameters: float shape_diff - return value of faceDetect.matchShapes()
float temp_diff - return value of faceDetect.matchTemplate()
string pic_name - picture name
int face_id - user_face identication code"""
query = '''UPDATE test_db.users_faces SET shape_diff = %s,
temp_diff = %s,
pic_name = '%s',
datetime = NOW() WHERE face_id = %s''' % (shape_diff, temp_diff, pic_name, face_id)
print "Updating user_face: face_id =", face_id, 'VALUES(', shape_diff, ',', temp_diff, ')'
try:
self.cursor.execute(query)
except Exception, info:
print 'Error - Updating user_face', info
def create_user_face(self, shape_diff, temp_diff, pic_name, user_id):
"""Creates an user_face record.
Parameters: float shape_diff - return value of faceDetect.matchShapes()
float temp_diff - return value of faceDetect.matchTemplate()
string pic_name - picture name
int user_Id - user identication code"""
print "PICNAME: ", pic_name
query = '''INSERT INTO test_db.users_faces (user_id, shape_diff, temp_diff, pic_name, datetime)
VALUES(%s, %s, %s, '%s', NOW())''' % (user_id, shape_diff, temp_diff, pic_name)
print "Creating user_face: user_id =", user_id, 'VALUES(', shape_diff, ',', temp_diff, ')'
try:
self.cursor.execute(query)
except Exception, info:
print 'Error - Creating user_face', info
def create_user(self, name):
"""Creates an user.
Parameters: string name - name of the user (format: 'Piet' or 'Piet Hein')"""
query = ''
user_id = 1
if self.get_max_user_id():
user_id = self.get_max_user_id() + 1
if " " in name:
first_name, last_name = name.split(" ")
first_name, last_name = first_name.capitalize(), last_name.capitalize()
full_name = first_name + '_' + last_name
query = "INSERT INTO test_db.users VALUES (%s, '%s', 'Dhr.', '%s', '%s', NOW(), 'BT+ Group', '', '', '', 50)" % (user_id, full_name, first_name, last_name)
else:
query = "INSERT INTO test_db.users VALUES (%s, '%s_Achternaam', 'Dhr.', '%s', 'Achternaam', NOW(), 'BT+ Group', '', '', '', 50)" % (user_id, name, name)
print "Creating an user:", query
try:
self.cursor.execute(query)
except Exception, info:
print "Error - Creating an user:", info
def connect_db(self):
"""Creates a connection with the database.
For the database configuration see conf.py."""
print 'Made connection with %s' % db_host
self.connection = True
self.conn = MySQLdb.connect(host = db_host, user = db_user, passwd = db_passwd)
self.cursor = self.conn.cursor()
def close_db(self):
"""Closes the connection with the database."""
print 'Closing database connection'
self.connection = False
self.conn.close()
| Python |
#!/usr/bin/python
# Imports
import os, sys
import pygame
from data import *
from faceDetect import FaceDetect
from inputbox import Inputbox
from sql import SQL
from dialog import Dialog
# main window parameters
main_buttons = ["Login", "Take Pictures", "Create User", "Quit"]
main_window = 'Main'
main_title = 'Choose your option'
class Scene(object):
"""Base of the program. Each window gets set and drawn here.
Backgrounds, buttons and text get drawn here.
Each window atleast has a display screen and a window name."""
def __init__(self, screen, window, window_buttons = None, users = None, title = ''):
"""Initialization of Scene where the current window is set.
Creates a SQL, Inputbox and FaceDetect instance.
Eventually starts the program by calling the main_loop()
Parameters: display screen - drawing screen
string window - window name (choices: Main, Login, Create User, Pictures, Match, Users List)
list window_buttons - list of button names
list users - list of users where each user contains an user_id, title, first and last name in this order
string title - title of the window"""
print 'Initializing Scene'
self.buttons = []
self.keepGoing = True
self.set_scene(screen, window, window_buttons, users, title)
self.sql = SQL()
self.sql.connect_db()
self.box = Inputbox(self.screen, self.sql, self)
self.FD = FaceDetect(self.screen, self.sql, self)
self.main_loop()
def main_loop(self):
"""Starting the loop of the program
Quits the program when you're in the main window and hit escape, else it will just return to it.
This loop checks which window should be drawn by looking at self.window.
Each window has it's own buttons and track_buttons. Track_buttons checks which button is pressed."""
while self.keepGoing:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
self.sql.close_db()
sys.exit()
print 'Quitting Facerecognition'
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
if self.window == 'Main':
print 'Quitting Facerecognition'
self.sql.close_db()
sys.exit()
else:
self.main_window()
elif event.type == pygame.MOUSEBUTTONUP:
if self.window == 'Main':
self.track_main_buttons()
elif self.window == 'Login':
self.track_login_buttons()
elif self.window == 'Create User':
if self.track_create_buttons(self):
exception = False
log_name = ''
if " " in self.inputbox.capitalize():
first_name, last_name = self.inputbox.split(" ")
log_name = '%s_%s' % (first_name.capitalize(), last_name.capitalize())
else:
log_name = '%s_Achternaam' % (self.inputbox.capitalize())
if self.sql.user_exists(login_name = log_name):
exception = True
else:
self.sql.create_user(self.inputbox.capitalize())
if not exception:
user = self.sql.get_user(user_id = self.sql.get_last_inserted_user_id())
dialog = Dialog(self.screen, 'Create User', \
['User named %s %s created.' % (user[2], user[3]), 'User Identification Code: %s' % (user[0])], \
'Shoot Picture', self)
dialog.set_user_id(user[0])
dialog.main()
else:
user = self.sql.get_user(login_name = log_name)
dialog = Dialog(self.screen, 'Error: Create User', \
['User named %s %s already exists.' % (user[1], user[2]), 'User Identification Code: %s' % (user[0])], \
'Shoot Picture', self)
dialog.set_user_id(user[0])
dialog.main()
elif self.window == 'Pictures':
self.track_picture_buttons(self.inputbox, self.img, self.box)
elif self.window == 'Match':
self.track_match_buttons()
elif self.window == 'Users List':
self.track_user_list_buttons()
self.screen.fill((0, 0, 0))
if self.window == 'Main':
self.draw_background("main_background.png")
self.draw_window_buttons(vertical = True)
elif self.window == 'Login':
self.draw_background("background.png")
self.draw_window_buttons(vertical = False)
self.img = self.FD.login()
elif self.window == 'Create User':
self.draw_background("background.png")
self.draw_window_buttons(vertical = False)
self.draw_text('Only one First and Last name is possible. (Example: Jan Jansen)',
28, (255, 255, 255), 'CENTER', 325)
self.box.set_question('Name')
self.inputbox = self.box.ask(events)
elif self.window == 'Pictures':
self.draw_background("background.png")
self.draw_window_buttons(vertical = False)
self.img = self.FD.draw_cam()
self.draw_text('Enter your user identification code.',
28, (255, 255, 255), 'CENTER', 360)
self.box.set_question('User ID')
self.inputbox = self.box.ask(events, 1.5)
elif self.window == 'Match':
self.draw_background("main_background.png")
self.draw_matches()
self.draw_window_buttons(vertical = False)
elif self.window == 'Users List':
self.draw_background("main_background.png")
self.draw_matches()
self.draw_buttons()
self.draw_text(self.title, 40, (255, 255, 255), 'CENTER', 55)
pygame.display.flip()
def draw_background(self, background):
"""Draws the background picture on the screen.
Parameters: string background - name of the background image"""
background = load_image(os.path.join(DATADIR, background))
self.screen.blit(background, (0, 0))
def track_main_buttons(self):
"""Tracks all buttons in the main window, performing an action depending on the button pressed."""
for b in self.window_buttons:
if mouse_collision(b[1]):
print '[%s] button was pressed with %s' % (b[0], b[1])
if b[0] == 'Login':
self.login_window()
if b[0] == 'Take Pictures':
self.take_picture_window()
if b[0] == 'Create User':
self.create_user_window()
if b[0] == 'Quit':
sys.exit()
def track_login_buttons(self):
"""Tracks all buttons in the login window, performing an action depending on the button pressed.
Try Again - Resets the countdown, starting over again.
Back - Returns to the main window"""
for b in self.window_buttons:
if mouse_collision(b[1]):
print '[%s] button was pressed with %s' % (b[0], b[1])
if b[0] == 'Try Again':
self.FD.reset_countdown()
if b[0] == 'Back':
self.main_window()
def track_create_buttons(self, box):
"""Tracks all buttons in the create user window, performing an action depending on the button pressed.
Create - Returns True which simply determines if the button is pressed.
Back - Cleans the inputbox and returns to the main window
Parameters: Inputbox box - instance of Inputbox()"""
for b in self.window_buttons:
if mouse_collision(b[1]):
print '[%s] button was pressed with %s' % (b[0], b[1])
if b[0] == 'Create':
return True
if b[0] == 'Back':
self.box.reset()
self.main_window()
def track_picture_buttons(self, user_id, img, box):
"""Tracks all buttons in the take picture window, performing an action depending on the button pressed.
Shoot - Takes a picture, saves it in 'capture/' and in the database, linking it to the user_id entered in the inputbox.
Back - Cleans the inputbox and returns to the main window
Parameters: int user_id - user identification code
IplImage img - image of frame of the (web)cam
Inputbox box - instance of Inputbox()"""
for b in self.window_buttons:
if mouse_collision(b[1]):
print '[%s] button was pressed with %s' % (b[0], b[1])
if b[0] == 'Shoot':
if self.sql.user_exists(user_id = user_id):
self.FD.set_user_id(int(user_id))
self.FD.save_tmp_image(img)
for filename in self.FD.captures:
self.FD.save_diffs(filename)
self.FD.captures = []
else:
box.existing_user = False
if b[0] == 'Back':
self.box.reset()
self.main_window()
def track_match_buttons(self):
"""Tracks all buttons in the match window, performing an action depending on the button pressed.
Try Again - Reloads the login window.
Main menu - Return to the main window.
Users List - Shows the users list window and sets self.match_found = True, determing the window title that will be shown.
Reload - Refines the matches.
The matches (names) shown are buttons aswell which saves and links the captured picture(s) to the name when pressed. """
for b in self.window_buttons:
if mouse_collision(b[1]):
print '[%s] button was pressed with %s' % (b[0], b[1])
if b[0] == 'Try Again':
self.login_window()
if b[0] == 'Main menu':
self.main_window()
if b[0] == 'Users List':
self.user_list_window(True)
for t in self.users:
if mouse_collision(t[1]):
print '[%s] button was pressed with %s' % (t[0], t[1])
self.learn_system(t)
def track_user_list_buttons(self):
"""Tracks all buttons in the user list window, performing an action depending on the button pressed.
Back - Return to the main window.
The names shown are buttons aswell which saves and links the captured picture(s) to the name when pressed. """
for b in self.buttons:
if mouse_collision(b[1]):
print '[%s] button was pressed with %s' % (b[0], b[1])
if b[0] == 'Back':
self.main_window()
for t in self.users:
if mouse_collision(t[1]):
print '[%s] button was pressed with %s' % (t[0], t[1])
self.learn_system(t)
def learn_system(self, user):
"""Saves and links the captured picture(s) to the name when pressed.
Saving the picture in 'capture/' and linking it to the user through user_id by creating an user_face in the database.
Parameters: list user: list of user parameters which should contain user_id, title, first and last name in this order."""
user_id, title, name = user[0][0], user[0][1], '%s %s ' % (user[0][2], user[0][3])
print 'Selected user:', 'ID = %s, Title = %s, Name = %s' % (user_id, title, name)
self.FD.set_user_id(user_id)
for capture in self.FD.captures:
self.FD.save_diffs(capture)
self.main_window()
def set_scene(self, screen, window, window_buttons = None, users = None, title = ''):
"""Sets the current scene.
Parameters: display screen - drawing screen
string window - window name (choices: Main, Login, Create User, Pictures, Match, Users List)
list window_buttons - list of button names
list text - list users (where each user contains an user_id, title, first and last name in this order)
string title - title of the window"""
self.screen = screen
self.window = window
self.title = title
self.window_buttons = []
self.users = []
self.buttons = []
b = load_image(os.path.join(DATADIR, "button_blue.png"))
b_ro = load_image(os.path.join(DATADIR, "button_black.png"))
if window_buttons:
for button in window_buttons:
self.window_buttons.append([button, pygame.Rect(0, 0, 0, 0), b, b_ro])
if users:
for line in users:
self.users.append([line, pygame.Rect(0, 0, 0, 0)])
def create_button(self, name, pos, b, b_ro):
"""Adds a button to the buttons list. This function can be used for different buttons on a specific locations.
Parameters: string name - name of the button
topleft pos - position of the button (x, y)
string b - name of the button image
string b_ro - name of the button image when selected"""
b = load_image(os.path.join(DATADIR, b))
b_ro = load_image(os.path.join(DATADIR, b_ro))
rect = pygame.Rect(b.get_rect())
rect.x, rect.y = pos
self.buttons.append([name, rect, b, b_ro])
def draw_text(self, text, fontsize, color, x, y):
"""Draws a text line
Parameters: string text - Text string
int fontsize - The size of the font
Color color - color of the text
string x - x position of the text (LEFT, CENTER or RIGHT)
int y - y position of the text"""
font = pygame.font.Font(None, fontsize)
ren = font.render(text, 1, color)
if x == 'LEFT':
x = 25
elif x == 'CENTER':
x = (self.screen.get_width() / 2) - (ren.get_width() / 2)
elif x == 'RIGHT':
x = self.screen.get_width() - ren.get_width() - 25
self.screen.blit(ren, (x, y))
def draw_matches(self):
"""Draws the match (user) buttons showing their Title, First and Last name."""
font = pygame.font.Font(None, 36)
width = 0
height = 0
i = 1
j = 0
widest = 0
color = (255, 255, 255)
if not self.match_found:
# Replaces title
self.title = ''
self.draw_text('No face detected. Please choose your name or try again.',
28, (255, 255, 255), 'CENTER', 55)
for t in self.users:
if mouse_collision(t[1]):
color = (0, 255, 0)
else:
color = (255, 255, 255)
title_ren = font.render(t[0][1], 1, color)
name_ren = font.render(t[0][2] + ' ' + t[0][3], 1, color)
if name_ren.get_width() > widest:
widest = name_ren.get_width()
if title_ren.get_width() >= name_ren.get_width():
width = title_ren.get_width()
else:
width = name_ren.get_width()
height = (title_ren.get_height() + name_ren.get_height())
x = 25
y = (title_ren.get_height() + name_ren.get_height())
if (i % 9 == 0):
j += 1
i = 1
x_point = x + (j * (widest + 10))
y_point = (i * y) + 43 + (i * 10)
t[1] = pygame.Rect(x_point, y_point, width, height)
self.screen.blit(title_ren, (x_point, y_point))
self.screen.blit(name_ren, (x_point, y_point + title_ren.get_height()))
i += 1
def draw_buttons(self):
"""Draws all buttons that were created by create_button()."""
for b in self.buttons:
self.screen.blit(b[2], b[1])
if mouse_collision(b[1]):
self.screen.blit(b[3], b[1])
def draw_window_buttons(self, vertical = True):
"""Draws all window buttons specified for each window.
Parameters: bool vertical - if True then the buttons are drawn vertically, if False then the buttons are drawn horizontally."""
font = pygame.font.Font(None, 40)
h = 1
v = 1
width = 0
height = 0
for b in self.window_buttons:
total_width = (len(self.window_buttons) * (b[2].get_width())) + ((len(self.window_buttons) - 1) * 20)
if vertical:
width = ((self.screen.get_width()/ 2) - (b[2].get_width() / 2))
height = (b[2].get_height() + 40)
v = 1
else:
width = (self.screen.get_width() / 2) - (total_width / 2)
height = (self.screen.get_height() / 1.25) - (b[2].get_height() / 2)
h = 1
b[1] = b[2].get_rect()
b[1].x, b[1].y = v*(width+10) + ((v-1)*b[1].width - (v-1)*width), h*height
self.screen.blit(b[2], b[1])
if mouse_collision(b[1]):
self.screen.blit(b[3], b[1])
ren = font.render(b[0], 1, (255, 255, 255))
self.screen.blit(ren, ((b[1].width / 2) - (ren.get_width() / 2) + (v * (width + 10) + ((v-1)*b[1].width - (v-1)*width)), (h * height) + ren.get_height()))
h += 1
v += 1
def main_window(self):
"""Sets the scene to main window"""
print 'Opening main window'
self.set_scene(self.screen, main_window, main_buttons, title = main_title)
def login_window(self):
"""Sets the scene to login window"""
print 'Opening login window'
self.match_found = True
self.FD.captures = []
self.FD.reset_countdown()
self.set_scene(self.screen, 'Login', ['Try Again','Back'])
def take_picture_window(self):
"""Sets the scene to take picture window"""
print 'Opening picture window'
self.set_scene(self.screen, 'Pictures', ['Shoot','Back'])
def create_user_window(self):
"""Sets the scene to create user window"""
print 'Opening create user window'
self.set_scene(self.screen, 'Create User', ['Create', 'Back'])
def match_window(self, users):
"""Sets the scene to match window
Parameters: list users - list of users where each user contains an user_id, title, first and last name in this order"""
print 'Opening match window'
self.set_scene(self.screen, 'Match', ['Try Again', 'Main menu', 'Users List'], users, "Found Matches")
def user_list_window(self, match_found):
"""Sets the scene to user list window
Parameters: bool match_found - Determines the title of this window."""
print 'Opening user list window'
self.match_found = match_found
self.set_scene(self.screen, 'Users List', None, self.sql.get_user_list(), 'User List')
self.create_button('Back', (610, 10), 'back.png', 'back_ro.png')
def main(screen):
"""Creates a Scene instance which sets the scene to main window."""
Scene(screen, main_window, main_buttons, title = main_title)
| Python |
#!/usr/bin/python
# Imports
import sys
import pygame
import scene
class Inputbox(object):
"""Used when you want to know certain information which should be entered through key events."""
def __init__(self, screen, sql, scene):
"""Initialization of Inputbox.
Creates the necessary global parameters.
Parameters: display screen - drawing screen
SQL sql - instance of SQL()
Scene scene - instance of Scene()"""
print 'Initializing Inputbox'
self.screen = screen
self.question = ''
self.sql = sql
self.scene = scene
self.keepGoing = True
self.existing_user = True
self.current_string = []
self.img = None
self.font = pygame.font.Font(None, 25)
def set_question(self, question):
"""Sets the question shown in the inputbox.
Parameters: string question - what you want to know"""
self.question = question
def display_box(self, screen, question, division):
"""Draws the inputbox.
Parameters: display screen - drawing screen
string question - message shown in the inputbox (which is what you want to know)
double division - y-coord is devided by this number"""
x = screen.get_width() / 2
y = screen.get_height() / division
rect = pygame.Rect((0, 0, 500, 30))
pygame.draw.rect(screen, (255,255,255), ((x - rect.width / 2), (y - rect.height / 2), rect.width, rect.height), 0)
if len(question) != 0:
screen.blit(self.font.render(question, 1, (0,0,0)), (x - rect.width / 2, y - rect.height / 3.5))
def ask(self, events, division = 2):
"""Checks all the key events while the inputbox is being drawn.
If a letter is pressed this letter is added to a string list.
If backspace is pressed the last letter is removed from the string list.
Returns the string list as a string.
Parameters: event events - pygame.event.get()
double division - y-coord is devided by this number"""
if not self.existing_user:
self.scene.draw_text("No face was detected or the entered ID code does not exist.", 28, (255, 0, 0), 'CENTER', 335)
self.display_box(self.screen, self.question + ": " + pygame.string.join(self.current_string,""), division)
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE:
self.current_string = self.current_string[0:-1]
elif event.key == pygame.K_MINUS:
self.current_string.append("_")
elif event.key <= 127:
self.current_string.append(chr(event.key))
return pygame.string.join(self.current_string,"")
def reset(self):
"""Resets the inputbox cleaning the string list."""
self.current_string = []
self.existing_user = True
| Python |
#!/usr/bin/python
# Imports
import os
import opencv
from opencv.cv import *
from opencv.highgui import *
########## Variables ##########
# Database configuration
db_host = "localhost"
db_user = "root"
db_passwd = ""
# Amount of seconds untill the login sequence
countdown_seconds = 5
# Amount of pictures taken when logging in
pictures_taken = 1
# Maximum amount of pictures to me saved in the database
max_amount_of_pics = 20
# Color of the ellipses around the faces in the pictures and fingerprint.
ellipse_color = 0, 255, 0 # Green
# Amount of possible matches showed after the login sequence.
amount_of_matches = 5
########## Methods ##########
def create_or_update_user_faces(FD, sql, base = 'capture/'):
"""If the fingerprint.png has been changed this method is used to update or create users_faces's the diffs in the database to the new and correct values.
Only works with the taken pictures, because the pic_names have to correspond to the pic_names in the database.
Parameters: FaceDetect FD - instance of FaceDetect()
SQL sql - instance of SQL()
string base - basepath"""
faces = os.listdir(base)
faces.sort()
for pic in faces:
filename = os.path.join(base, pic)
shape_diff, temp_diff = FD.match(filename)
user_id, rest = pic.split('_')
if sql.user_face_exists(pic):
face_id = sql.get_user_face(pic)
sql.update_user_face(shape_diff, temp_diff, pic, face_id)
print 'Updated user_face for', pic
else:
sql.create_user_face(shape_diff, temp_diff, pic, user_id)
print 'Created user_face for', pic
def set_background_colors(base, pictures):
"""Allows you to change the background color of one or multiple pictures.
Color can be set above.
Parameters: string base - basepath
list pictures - list of image names"""
if len(pictures[0]) == 1:
filename = os.path.join(base, pictures)
img = cvLoadImage(filename)
fill_background(filename, img)
else:
for picture in pictures:
filename = os.path.join(base, picture)
img = cvLoadImage(filename)
fill_background(filename, img)
def fill_background(filename, img):
"""Fills the background of a picture with ellipses.
Parameters: string filename - location of the image
IplImage img - image"""
width = img.width / 2
height = img.height / 2
while width < (img.width / 2 + 25) and height < (img.height / 2 + 25):
cvEllipse(img, cvPoint(img.width / 2, img.height / 2), cvSize(width, height), 0, 0, 360, ellipse_color, 2, 8, 0)
width += 1
height += 1
cvSaveImage(filename, img)
print 'Resaved:', filename
def create_test_users(sql):
"""Creates 50 test_users
Parameters: SQL sql - instance of SQL()"""
name = 1
max_name = 50
while name <= max_name:
if not sql.user_exists(login_name = 'User_%s' % name):
sql.create_user('User %s' % str(name))
name += 1
| Python |
#!/usr/bin/python
import pygame
from data import *
class Dialog(object):
"""A Dialog has an 'Ok'-button as a standard and can have one extra.
Depending on the name of the extra button the function behind it should be written in de eventloop in main()."""
def __init__(self, screen, title, message, extra_button_text = None, scene = None):
"""Initialization of Inputbox.
Creates the necessary global parameters, including several rectangles which will be used to draw the dialog.
Parameters: display screen
string title - title of the dialog
string message - list of strings (lines)
string extra_button_text - title of the possible extra button
Scene scene - instance of Scene()"""
print 'Initializing Dialog'
self.screen = screen
self.font = pygame.font.Font(None, 32)
self.title = self.font.render(title, 1, (255, 255, 255))
self.message = message
self.extra_button_text = extra_button_text
self.scene = scene
self.keepGoing = True
self.size = self.width, self.height = 500, 275
self.user_id = ''
self.surface = pygame.Rect(((self.screen.get_width() / 2 - self.width / 2), (self.screen.get_height() / 2 - self.height / 2)), (self.size))
self.surface_contour = pygame.Rect(self.surface.x - 1, self.surface.y - 1, self.surface.width + 2, self.surface.height + 2)
self.bar = pygame.Rect(self.surface.x, self.surface.y, self.width, 35)
self.bar_contour = pygame.Rect(self.surface.x, self.surface.y, self.width, self.bar.height)
self.button_ren = self.font.render('OK', 1, (255, 255, 255))
self.button = pygame.Rect(0, 0, self.button_ren.get_width() + 20, self.button_ren.get_height() + 20)
x, y = self.surface.bottomright
self.button.bottomright = x - 10, y - 10
self.button_contour = pygame.Rect(self.button.x - 1, self.button.y - 1, self.button.width + 2, self.button.height + 2)
def draw_extra_button(self):
"""Draws the extra button."""
self.extra_button_ren = self.font.render(self.extra_button_text, 1, (255, 255, 255))
self.extra_button = pygame.Rect(0, 0, self.extra_button_ren.get_width() + 20, self.extra_button_ren.get_height() + 20)
x, y = self.surface.bottomleft
self.extra_button.bottomleft = x + 10, y - 10
self.extra_button_contour = pygame.Rect(self.extra_button.x -1, self.extra_button.y - 1, self.extra_button.width + 2, self.extra_button.height + 2)
if mouse_collision(self.extra_button):
pygame.draw.rect(self.screen, (45, 45, 45), self.extra_button)
else:
pygame.draw.rect(self.screen, (40, 77, 171), self.extra_button)
pygame.draw.rect(self.screen, (150, 150, 150), self.extra_button_contour, 1)
self.screen.blit(self.extra_button_ren, \
(self.extra_button.x + (self.extra_button.width / 2 - self.extra_button_ren.get_width() / 2), \
(self.extra_button.y + (self.extra_button.height / 2 - self.extra_button_ren.get_height() / 2))))
def set_user_id(self, user_id):
"""Sets the global user_id parameter.
Parameters: int user_id - user identification code"""
self.user_id = user_id
def main(self):
"""Main loop of the dialog, this will overrule the main loop of scene untill this one is ended.
Draws all the necessary rectangles, strings and images for the dialog.
Checks of key events.
Quit and escape exit the dialog. Mousebuttonup is also tracked for both the standard button and the extra button.
If a new extra button is added this should also be added in the event loop. """
while self.keepGoing:
pygame.draw.rect(self.screen, (255, 255, 255), self.surface)
pygame.draw.rect(self.screen, (150, 150, 150), self.surface_contour, 2)
pygame.draw.rect(self.screen, (40, 77, 171), self.bar)
pygame.draw.rect(self.screen, (0, 0, 0), self.bar_contour, 1)
self.screen.blit(self.title, (self.bar.x + 5, self.bar_contour.y + (self.bar.height / 2 - self.title.get_height() / 2)))
y = 1
for line in self.message:
ren = self.font.render(line, 1, (45, 45, 45))
y_point = (y * ren.get_height()) + (y * 20)
self.screen.blit(ren, (self.surface.x + 15, (y_point + 175)))
y += 1
if mouse_collision(self.button):
pygame.draw.rect(self.screen, (45, 45, 45), self.button)
else:
pygame.draw.rect(self.screen, (40, 77, 171), self.button)
pygame.draw.rect(self.screen, (150, 150, 150), self.button_contour, 1)
self.screen.blit(self.button_ren, \
(self.button.x + (self.button.width / 2 - self.button_ren.get_width() / 2), \
(self.button.y + (self.button.height / 2 - self.button_ren.get_height() / 2))))
if self.extra_button_text:
self.draw_extra_button()
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keepGoing = False
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
self.keepGoing = False
elif event.type == pygame.MOUSEBUTTONUP:
if mouse_collision(self.button):
self.keepGoing = False
try:
self.scene.box.current_string = []
except:
pass
elif mouse_collision(self.extra_button):
if self.extra_button_text == 'Shoot Picture':
self.keepGoing = False
self.scene.box.current_string = ['%s' % self.user_id]
self.scene.take_picture_window()
| Python |
# -*- coding: utf-8 -*-
"""
@author: Bruno Cezar Rocha
@titter: @rochacbruno
@company: blouweb.com
@depends: http://www.wbotelhos.com/gridy/ - Jquery Gridy Plugin
@include: http://nyromodal.nyrodev.com/ - nyroModal
@include: http://css3buttons.michaelhenriksen.dk/ - CSS3 Buttons
@depends: http://www.web2py.com - web2py Faster, Better and more easily web development!
@license for Gridy library and PowerGrid Plugin
The MIT License
Copyright (c) 2010 Washington Botelho dos Santos (jquery.gridy)
Copyright (c) 2011 Bruno Cezar Rocha (PowerGrid Plugin for web2py)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@DONATE! PayPal - rochacbruno@gmail.com
Go VEGAN!
"""
from gluon import *
from gluon.dal import Table ,Query, Set, Rows, Row
class CallBack(dict):
def __init__(self, datasource, params=None, virtualfields=[], quiz=None, groupfilter=None, nivfilter=None):
if groupfilter == 'ALL':
groupfilter=None
if nivfilter == 'all':
nivfilter = None
if not params:
params = current.request.vars
assert params
# GET PARAMS
page = int(params.get('page', 1))
rows = int(params.get('rows', 5))
key = params.get('search', None)
find = params.get('find', None)
sortName = params.get('sortName', None)
sortOrder = params.get('sortOrder', 'asc')
searchBy = params.get('searchBy','like') #like, equal, notequal, startswith, endswith, gt, lt, ge, le
# DEFINE PAGINATION
start = (rows * page) - rows
end = start + rows
#limiter
limiter = (start, end)
assert isinstance(datasource, (Table, Query))
# IF TABLE
if isinstance(datasource, Table):
for v in virtualfields:
datasource.virtualfields.append(v)
if not (key and find):
query = datasource
else:
try:
if searchBy == 'like':
query = datasource[find].like("%"+key+"%")
elif searchBy == 'equal':
query = datasource[find] == key
elif searchBy == 'notequal':
query = datasource[find] != key
elif searchBy == 'startswith':
query = datasource[find].like(key+"%")
elif searchBy == 'endswith':
query = datasource[find].like("%"+key)
except:
query = datasource['id'].like("%"+key+"%")
#counter
if groupfilter and quiz:
query=query&datasource.db[quiz]['group_id'].like(groupfilter)
if nivfilter and quiz:
query=query&datasource.db[quiz]['difficulty'].like(nivfilter)
recordscount = datasource._db(query).count()
#SORTING
sorter = None
try:
if (sortName and (sortOrder == 'asc')):
sorter = datasource[sortName]
elif (sortName and (sortOrder == 'desc')):
sorter = ~datasource[sortName]
except:
pass
# FETCHING
if groupfilter and quiz:
query=query&datasource.db[quiz]['group_id'].like(groupfilter)
if nivfilter and quiz:
query=query&datasource.db[quiz]['difficulty'].like(nivfilter)
recordset = datasource._db(query)
# IF QUERY
elif isinstance(datasource, Query):
for v in virtualfields:
datasource.first.table.virtualfields.append(v)
if not (key and find):
query = datasource
else:
if len(find.split('.')) == 1:
try:
if searchBy == 'like':
query = datasource.first.table[find].like("%"+key+"%")
elif searchBy == 'equal':
query = datasource.first.table[find] == key
elif searchBy == 'notequal':
query = datasource.first.table[find] != key
elif searchBy == 'startswith':
query = datasource.first.table[find].like(key+"%")
elif searchBy == 'endswith':
query = datasource.first.table[find].like("%"+key)
except:
query = datasource.first.table['id'].like("%"+key+"%")
else:
try:
tablename = find.split('.')[0]
field = find.split('.')[1]
#ds.db(ds&ds.db.auth_user.first_name.like('%jon%'))
if searchBy == 'like':
query = datasource&datasource.db[tablename][field].like("%"+key+"%")
elif searchBy == 'equal':
query = datasource&datasource.db[tablename][field] == key
elif searchBy == 'notequal':
query = datasource&datasource.db[tablename][field] != key
elif searchBy == 'startswith':
query = datasource&datasource.db[tablename][field].like(key+"%")
elif searchBy == 'endswith':
query = datasource&datasource.db[tablename][field].like("%"+key)
except:
query = datasource&datasource.db[tablename]['id'].like("%"+key+"%")
#counter
if groupfilter and quiz:
query=query&datasource.db[quiz]['group_id'].like(groupfilter)
if nivfilter and quiz:
query=query&datasource.db[quiz]['difficulty'].like(nivfilter)
recordscount = datasource.db(query).count()
#SORTING
sorter = None
try:
if (sortName and (sortOrder == 'asc')):
if len(sortName.split('.')) == 1:
sorter = datasource.first.table[sortName]
else:
tablename = sortName.split('.')[0]
field = sortName.split('.')[1]
sorter = datasource.db[tablename][field]
elif (sortName and (sortOrder == 'desc')):
if len(sortName.split('.')) == 1:
sorter = ~datasource.first.table[sortName]
else:
tablename = sortName.split('.')[0]
field = sortName.split('.')[1]
sorter = ~datasource.db[tablename][field]
except:
pass
# FETCHING
if groupfilter and quiz:
query=query&datasource.db[quiz]['group_id'].like(groupfilter)
if nivfilter and quiz:
query=query&datasource.db[quiz]['difficulty'].like(nivfilter)
recordset = datasource.db(query)
records = recordset.select(limitby=limiter,
orderby=sorter)
# RETURN OBJECT
self['entityList'] = []
self['total'] = recordscount
self['headers'] = []
# Building the headers
if isinstance(datasource, Query):
for field in datasource.first.table.fields:
self['headers'].append([field,field])
if isinstance(datasource, Table):
fields = datasource.fields
#for r in records:
# if callable(r):
# self['entityList'].append(r.as_dict())
for record in records:
#TODO: REPRESENT OF FIELDS
# records_dict = {}
# if callable(record):
# for r in record:
# if isinstance(datasource, Query):
# if callable(datasource.first.table[r].represent):
# print '___'#,datasource.first.table[r].represent
# else:
# print '###',r,record(r)
if callable(record):
#if isinstance(record, Row):
if not record.has_key('id'):
newrecord = {}
for r in record:
prefix = str(r)
newr = {}
for x,v in record[r].items():
if not callable(v):
newr["_".join([prefix,x])] = v
newrecord.update(newr)
self['entityList'].append(newrecord)
else:
self['entityList'].append(record.as_dict()) | Python |
# -*- coding: utf-8 -*-
"""
PowerGrid
@Version Beta 0.1 - 22/07/2011
@author: Bruno Cezar Rocha
@titter: @rochacbruno
@company: blouweb.com
@depends: http://www.wbotelhos.com/gridy/ - Jquery Gridy Plugin
@include: http://nyromodal.nyrodev.com/ - nyroModal
@include: http://css3buttons.michaelhenriksen.dk/ - CSS3 Buttons
@depends: http://www.web2py.com - web2py Faster, Better and more easily web development!
@license for Gridy library and PowerGrid Plugin
The MIT License
Copyright (c) 2011 Washington Botelho dos Santos (jquery.gridy)
Copyright (c) 2011 Bruno Cezar Rocha (PowerGrid Plugin for web2py)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@DONATE! PayPal - rochacbruno@gmail.com
Go VEGAN!
#TODO:
- EXPORT TO EXCEL / PDF
- SELECT FIELDS IN CALLBACK
- POST TEMPLATE
- SCROLLING
- SEARCH ALL
- USE LABELS
- USE RECORD REPRESENTATION
"""
from gluon import *
__all__ = ['PowerGrid','PowerScript']
class PowerScript(DIV):
tag = 'script'
def xml(self):
(fa, co) = self._xml()
# no escaping of subcomponents
co = '\n'.join([str(component) for component in
self.components])
if co:
return '<%s%s>%s</%s>' % (self.tag, fa, co, self.tag)
else:
return DIV.xml(self)
class PowerGrid(DIV):
tag = 'div'
def __init__(self,
*components,
**attributes):
T = current.T
#define the id if not passed
if not '_id' in attributes:
attributes['_id'] = "powergrid-wrapper"
if not 'template' in attributes:
attributes['template'] = 'grid'
if not 'searchBy' in attributes:
attributes['searchBy'] = 'like'
if not 'target' in attributes:
attributes['target'] = 'powergrid'
if not 'as_html' in attributes:
attributes['as_html'] = True
if not 'callback' in attributes:
raise Exception("Callback is not defined")
else:
if not 'headers' in attributes:
attributes['headers'] = self.get_headers(attributes['callback'])
self.components = list(components)
#self.components.append(DIV(_id="find-target"))
self.components.append(DIV(_id=attributes['target']))
#self.components.append("aaa")
DIV.__init__(self,
*self.components,
**attributes)
from gluon.storage import Storage
self.attributes = Storage(attributes)
if not 'options' in self.attributes:
self.attributes.options = {}
#TODO: AUTO-SCROLL
#if len(self.attributes.get('headers',[])) >= 5:
# self.attributes.options['scroll'] = True
# self.attributes.options['height'] = 300
# self.attributes.options['width'] = 900
self.append_js()
def get_headers(self, url):
from urllib2 import urlopen
if 'http' in url:
u = urlopen(url).read()
else:
u = urlopen('http://'+current.request.env.http_host+url+"?page=1&rows=1").read()
null = None
d = eval(u)
try:
return d['headers']
except:
return [['noheaders','noheaders']]
def append_js(self):
current.response.files.append(URL('static','plugin_PowerGrid',args=['css','jquery.gridy.css']))
current.response.files.append(URL('static','plugin_PowerGrid',args=['buttons','stylesheets','css3buttons.css']))
current.response.files.append(URL('static','plugin_PowerGrid',args=['modal','styles','nyroModal.css']))
current.response.files.append(URL('static','plugin_PowerGrid',args=['js','jquery.tmpl.min.js']))
current.response.files.append(URL('static','plugin_PowerGrid',args=['js','jquery.gridy.min.js']))
current.response.files.append(URL('static','plugin_PowerGrid',args=['modal','js','jquery.nyroModal.custom.min.js']))
current.response.files.append(URL('static','plugin_PowerGrid',args=['modal','js','initmodal.js']))
if 'ie6' in self.attributes:
current.response.files.append(URL('static','plugin_PowerGrid',args=['modal','js','jquery.nyroModal-ie6.min.js']))
template = self.power_template(template=self.attributes['template'])
T = current.T
params = self.attributes.options.get('params','')
params += '&searchBy=%s' % self.attributes.get('searchBy','like')
lenheaders = len(self.attributes.get('headers',[]))
lenbuttons = len(self.attributes.get('buttons',[]))
colswidth = [(self.attributes.options.get('width', 900)+(lenbuttons*100)) / (lenheaders + lenbuttons) for head in self.attributes.get('headers',[])]
# CONTROL BUTTONS
if not 'hidecontrolbuttons' in self.attributes:
self.attributes['hidecontrolbuttons'] = False
if not self.attributes['hidecontrolbuttons']:
if not 'hideaddbutton' in self.attributes:
self.attributes['hideaddbutton'] = False
if not self.attributes['hideaddbutton']:
if not 'addurl' in self.attributes:
self.attributes['addurl'] = '#'
if not 'addLabel' in self.attributes:
self.attributes['addLabel'] = 'Toevoegen'
if not 'addTitle' in self.attributes:
self.attributes['addTitle'] = self.attributes['addLabel']
self.attributes.addbutton = """<a id=%(target)s_addbutton target=_blank class=button href=%(addurl)s>
%(addLabel)s</a>""" % self.attributes
else:
self.attributes.addbutton = ''
self.attributes['addTitle'] = ''
if not 'hiderefreshbutton' in self.attributes:
self.attributes['hiderefreshbutton'] = False
if not self.attributes['hiderefreshbutton']:
if not 'refreshLabel' in self.attributes:
self.attributes['refreshLabel'] = 'Ververs'
self.attributes.refreshbutton = """<a id=%(target)s_refreshbutton class=button href=# >
%(refreshLabel)s</a>""" % self.attributes
else:
self.attributes.refreshbutton = ''
self.attributes['controlbuttons'] = '''<span id=%(target)s_controlbuttons>
%(addbutton)s
%(refreshbutton)s
</span>
''' % self.attributes
# add controlbuttons?
self.attributes.addcontrolbuttons = """
$('#%(target)s_controlbuttons').remove();
$('#%(target)s').prepend('%(controlbuttons)s');
$('#%(target)s_controlbuttons').css('float','left').css('margin-right','40px');
$('#%(target)s_addbutton').addClass('addmodal positive');
$('#%(target)s_addbutton').attr('title','%(addTitle)s');
$('#%(target)s_addbutton').prepend('<span class=icon></span>');
$('#%(target)s_addbutton span').addClass('plus');
$('#%(target)s_refreshbutton').prepend('<span class=icon></span>');
$('#%(target)s_refreshbutton span').addClass('loop');
$('#%(target)s_refreshbutton').click(function(e){
e.preventDefault();
$.fn.gridy.reload('#%(target)s',{
page:$('#%(target)s input#current-page').val(),
rows:$('#%(target)s .gridy-row-option select').val(),
find:$('#%(target)s .gridy-search select').val(),
sortName:$('#%(target)s input#current-sort-name').val(),
sortOrder:$('#%(target)s input#current-sort-order').val(),
search:$('#%(target)s .gridy-search input#search').val()
}
);
});
""" % self.attributes
else:
self.attributes.addcontrolbuttons = ''
if not 'crudstyle' in self.attributes:
self.attributes.crudstyle = """
<link type=text/css rel=stylesheet href=%s>
""" % URL('static','plugin_PowerGrid',args='css/crud.css' )
if not 'minW' in self.attributes:
self.attributes.minW = 600
if not 'minH' in self.attributes:
self.attributes.minH = 400
self.attributes.options = {
'arrowDown': self.attributes.options.get('arrowDown', 'gridy-arrow-down'),
'arrowNone': self.attributes.options.get('arrowNone', 'gridy-arrow-none'),
'arrowUp': self.attributes.options.get('arrowUp', 'gridy-arrow-up'),
'before': self.attributes.options.get('before', None),
'buttonBackTitle':self.attributes.options.get('buttonBackTitle', str(T('Back'))),
'buttonNextTitle':self.attributes.options.get('buttonNextTitle', str(T('Next'))),
'buttonMax': self.attributes.options.get('buttonMax', 5),
'buttonOption': self.attributes.options.get('buttonOption', True),
'buttonTitle': self.attributes.options.get('buttonTitle', str(T('page'))),
'buttonsWidth': self.attributes.options.get('buttonsWidth', 'auto'),
'cache': self.attributes.options.get('cache', False),
'clickFx': self.attributes.options.get('clickFx', True),
'colsWidth': self.attributes.options.get('colsWidth', colswidth ),
'complete': self.attributes.options.get('complete', """function(){
$('.gridy-find-option select').prependTo('.gridy-search');
%(addcontrolbuttons)s
$('.confirmationmodal').click(function(e){
e.preventDefault();
if (confirm($(this).attr('title'))){
$.ajax({
type:'POST',
url:$(this).attr('href'),
success: function(){
$.fn.gridy.reload('#%(target)s',{
page:$('#%(target)s input#current-page').val(),
rows:$('#%(target)s .gridy-row-option select').val(),
find:$('#%(target)s .gridy-search select').val(),
sortName:$('#%(target)s input#current-sort-name').val(),
sortOrder:$('#%(target)s input#current-sort-order').val(),
search:$('#%(target)s .gridy-search input#search').val(),
}
);
}
});
}
}
);
$('.refreshmodal').nm({
callbacks:{
initElts:function(nm){
var minW = parseInt(nm.opener.attr('minW'));
var minH = parseInt(nm.opener.attr('minH'));
nm.sizes.minW = ((nm.opener.attr('minW') == undefined) ? %(minW)s : minW);
nm.sizes.minH = ((nm.opener.attr('minH') == undefined) ? %(minH)s : minH);
},
size: function(nm){$('iframe').css('width','100%%').css('height', nm.sizes.minH - 5 +'px' );},
filledContent: function(nm){
$('.nyroModalIframe iframe').load( function(){
$('body', $('iframe').contents()).prepend('%(crudstyle)s');
});
},
close: function(nm){
$.fn.gridy.reload('#%(target)s',{
page:$('#%(target)s input#current-page').val(),
rows:$('#%(target)s .gridy-row-option select').val(),
find:$('#%(target)s .gridy-search select').val(),
sortName:$('#%(target)s input#current-sort-name').val(),
sortOrder:$('#%(target)s input#current-sort-order').val(),
search:$('#%(target)s .gridy-search input#search').val(),
}
);
}
}
});
$('.addmodal').nm({
callbacks:{
initElts:function(nm){
var minW = parseInt(nm.opener.attr('minW'));
var minH = parseInt(nm.opener.attr('minH'));
nm.sizes.minW = ((nm.opener.attr('minW') == undefined) ? %(minW)s : minW);
nm.sizes.minH = ((nm.opener.attr('minH') == undefined) ? %(minH)s : minH);
},
size: function(nm){$('iframe').css('width','100%%').css('height', nm.sizes.minH - 30 +'px' );},
filledContent: function(nm){
$('.nyroModalIframe iframe').load( function(){
$('body', $('iframe').contents()).prepend('%(crudstyle)s');
});
},
close: function(nm){
$.fn.gridy.reload('#%(target)s',{
page:'1',
rows:$('#%(target)s .gridy-row-option select').val(),
find:$('#%(target)s .gridy-search select').val(),
sortName:'id',
sortOrder:'desc',
search:$('#%(target)s .gridy-search input#search').val(),
}
);
}
}
});
$('.modal').nm({
callbacks:{
initElts:function(nm){
var minW = parseInt(nm.opener.attr('minW'));
var minH = parseInt(nm.opener.attr('minH'));
nm.sizes.minW = ((nm.opener.attr('minW') == undefined) ? %(minW)s : minW);
nm.sizes.minH = ((nm.opener.attr('minH') == undefined) ? %(minH)s : minH);
},
size: function(nm){$('iframe').css('width','100%%').css('height', nm.sizes.minH - 5 +'px' );},
filledContent: function(nm){
$('.nyroModalIframe iframe').load( function(){
$('body', $('iframe').contents()).prepend('%(crudstyle)s');
});
},
}})
;}""" % self.attributes
),
'contentType': self.attributes.options.get('contentType', 'application/x-www-form-urlencoded; charset=utf-8'),
'dataType': self.attributes.options.get('dataType', 'json'),
'debug': self.attributes.options.get('debug', False),
'error': self.attributes.options.get('error', None),
'find': self.attributes.options.get('find', ''),
'findsName': self.attributes.options.get('findsName', self.attributes.get('headers')),
'findTarget': self.attributes.options.get('findTarget', 'gridy-search'),
'height': self.attributes.options.get('height', 'auto'),
'headersName': self.attributes.options.get('headersName', self.attributes.get('headers')),
'headersWidth': self.attributes.options.get('headersWidth', []),
'hoverFx': self.attributes.options.get('hoverFx', True),
'jsonp': self.attributes.options.get('jsonp', False),
'jsonpCallback': self.attributes.options.get('jsonpCallback', 'callback'),
'loadingIcon': self.attributes.options.get('loadingIcon', 'gridy-loading'),
'loadingOption': self.attributes.options.get('loadingOption', True),
'loadingText': self.attributes.options.get('loadingText', str(T('Loading...'))),
'messageOption': self.attributes.options.get('messageOption', True),
'messageTimer': self.attributes.options.get('messageTimer', 4000),
'noResultOption': self.attributes.options.get('noResultOption', True),
'noResultText': self.attributes.options.get('noResultText', str(T('No items found!'))),
'page': self.attributes.options.get('page', 1),
'params': params,
'resultOption': self.attributes.options.get('resultOption', True),
'resultText': self.attributes.options.get('resultText', str(T('Displaying {from} - {to} of {total} items'))),
'rows': self.attributes.options.get('rows', 5),
'rowsNumber': self.attributes.options.get('rowsNumber', [3, 5, 10, 25, 50, 100]),
'rowsTarget': self.attributes.options.get('rowsTarget', 'gridy-content'),
'search': self.attributes.options.get('search', ''),
'searchFocus': self.attributes.options.get('searchFocus', False),
'searchOption': self.attributes.options.get('searchOption', True),
'searchButtonLabel':self.attributes.options.get('searchButtonLabel', str(T('search'))),
'searchButtonTitle':self.attributes.options.get('searchButtonTitle', str(T('Start the search'))),
'searchText': self.attributes.options.get('searchText', str(T('type your search here...'))),
'scroll': self.attributes.options.get( 'scroll', False),
'sortersName': self.attributes.options.get('sortersName', []),
'sortName': self.attributes.options.get('sortName', ''),
'sortOrder': self.attributes.options.get('sortOrder', 'asc'),
'sorterWidth': self.attributes.options.get('sorterWidth', 'auto'),
'success': self.attributes.options.get('success', None),
'template': self.attributes.options.get('template', 'template'),
'templateStyle': self.attributes.options.get('templateStyle', 'gridy-default'),
'type': self.attributes.options.get('type', 'post'),
'url': self.attributes.get('callback'),
'width': self.attributes.options.get('width', 900),
'resize': self.attributes.options.get('resize', True)
}
script = """
$(function() {
$('#%(target)s').gridy(
%(options)s
);
})""" % self.attributes
self.append(PowerScript(script.replace('True','true').\
replace('False','false').\
replace('None','null').\
replace('"function','function').\
replace(';}"',';}').\
replace(']js"',' ').\
replace('"js[',' ').\
replace('\\n','').\
replace('arrownull','arrowNone'),
_type="text/javascript"))
self.append(PowerScript(template, _id='template',_type='text/x-jquery-tmpl'))
self.append(STYLE("""
div.gridy-default div.gridy-row {
border: 1px solid #CCC;
float: left;
margin-bottom: -1px;
width:%spx;
}""" % self.attributes.options.get('width', 900)
,
"""
.%s {
position:relative;
float:right !important;
}
""" % ('%s_buttons' % self.attributes['target'])
,
"""
#%(target)s {
width:%(width)spx;
margin:auto;
}
""" % dict(target=self.attributes['target'],
width=self.attributes.options.get('width', 900)
)
,
"""
.gridy-footer {
line-height:0 !important;
}
.gridy-find-option select, .gridy-row-option select, .gridy-search select{
width:auto !important;
}
"""
)
)
def power_template(self, template='grid'):
if template == 'grid':
# auto generated jquery template
template = DIV(_id="${id}")
[template.append(DIV('{{html %s}}' % head[0])) \
if self.attributes['as_html'] \
else template.append(DIV('${%s}' % head[0])) \
for head in self.attributes.get('headers',[])
]
if self.attributes.get('buttons', None):
div = DIV(_class='%s_buttons' % self.attributes['target'])
for button in self.attributes['buttons']:
try:
icon = SPAN(_class='%s icon' % button[5])
except:
icon = ''
button_attributes = {}
button_attributes['_href'] = button[1]
button_attributes['_target'] = button[2]
button_attributes['_title'] = button[3]
button_attributes['_class'] = button[4]
try:
button_attributes['_minW'] = button[6][0]
except:
'No width for button'
try:
button_attributes['_minH'] = button[6][1]
except:
'no height for button'
div.append(A(icon, button[0],**button_attributes))
template.append(div)
else:
template = template
return template
def callback(self, datasource):
from CallBack import CallBack
#if current.request.extension == 'json':
return CallBack(datasource)
| Python |
## Test script for testing remote RPC CALLS WHICH NEED
## admin authentication
##
## Usage: python testAdminXMLRPC.py [local]
import os,sys
import xmlrpclib
import pprint
#VMWARE TEST NODE
user="000c29764c2e"
pw="00505630601a"
if len(sys.argv)==2:
print 'Assuming local mode'
server_url = 'http://%s:%s@localhost:8000/provisioner/xmlrpc/call/xmlrpc' % (user,pw)
else:
server_url = 'http://%s:%s@update.braintrainerplus.com/xmlrpc/call/xmlrpc' % (user,pw)
server = xmlrpclib.Server(server_url)
print "TIME"
print server.giveMeTime()
updateInfo=server.getCurrentPackageInfo()
for update in updateInfo['packages']:
print update
| Python |
## Test script for testing remote RPC CALLS WHICH NEED
## admin authentication
##
## Usage: python removePackage.py [local]
import os,sys
import xmlrpclib
#VMWARE TEST NODE
user="000c29764c2e"
pw="00505630601a"
if len(sys.argv)==2:
print 'Assuming local mode'
server_url = 'http://%s:%s@localhost:8000/provisioner/xmlrpc/call/xmlrpc' % (user,pw)
else:
server_url = 'http://%s:%s@update.braintrainerplus.com/xmlrpc/call/xmlrpc' % (user,pw)
server = xmlrpclib.Server(server_url)
print "TIME"
print server.giveMeTime()
package_id=raw_input("Gimme package id:")
print server.packageRemoved(package_id) | Python |
## Test script for testing remote RPC CALLS WHICH NEED
## admin authentication
##
## Usage: python testAdminXMLRPC.py [local]
import os,sys
import xmlrpclib
#VMWARE TEST NODE
user="000c29764c2e"
pw="00505630601a"
if len(sys.argv)==2:
print 'Assuming local mode'
server_url = 'http://%s:%s@localhost:8000/provisioner/xmlrpc/call/xmlrpc' % (user,pw)
else:
server_url = 'http://%s:%s@update.braintrainerplus.com/xmlrpc/call/xmlrpc' % (user,pw)
server = xmlrpclib.Server(server_url)
print "TIME"
print server.giveMeTime()
updateInfo=server.getCurrentPackageInfo()
for update in updateInfo['packages']:
print "Going to install:"
print update
print server.saveInstallLog(update['id'],'Simulated install went ok', 'ok') | Python |
## Test script for testing remote RPC CALLS WHICH NEED
## admin authentication
##
## Usage: python testAdminXMLRPC.py [local]
import os,sys
import xmlrpclib
#NODE Q3100083
user="000c29764c2e"
pw="00505630601a"
if len(sys.argv)==2:
#assume local mode
server_url = 'http://%s:%s@localhost:8000/provisioner/xmlrpc/call/xmlrpc' % (user,pw)
else:
server_url = 'http://%s:%s@update.braintrainerplus.com/xmlrpc/call/xmlrpc' % (user,pw)
server = xmlrpclib.Server(server_url)
print "TIME"
print server.giveMeTime()
print server.getCurrentPackageInfo()
| Python |
## Test script for testing remote RPC CALLS WHICH NEED
## admin authentication
##
## Usage: python registerNode.py adminuser adminpass [local]
import os,sys
import xmlrpclib
user=sys.argv[1]
pw=sys.argv[2]
if len(sys.argv)==4:
#assume local mode
server_url = 'http://%s:%s@localhost:8000/provisioner/xmlrpc/call/xmlrpc' % (user,pw)
else:
server_url = 'http://%s:%s@update.braintrainerplus.com/xmlrpc/call/xmlrpc' % (user,pw)
server = xmlrpclib.Server(server_url)
print server.giveMeTime()
mac1=raw_input("Mac 1:")
mac2=raw_input("Mac 2:")
name=raw_input("Qiosq node name:")
tags=raw_input("Tags:")
print server.registerNode(name,mac1,mac2,name)
print server.tagNode(mac1,mac2,tags)
| Python |
## Test script for testing remote RPC CALLS WHICH NEED
## admin authentication
##
## Usage: python testAdminXMLRPC.py user pass [local]
import os,sys
import xmlrpclib
user=sys.argv[1]
pw=sys.argv[2]
if len(sys.argv)==4:
#assume local mode
server_url = 'http://%s:%s@localhost:8000/provisioner/xmlrpc/call/xmlrpc' % (user,pw)
else:
server_url = 'http://%s:%s@update.braintrainerplus.com/xmlrpc/call/xmlrpc' % (user,pw)
server = xmlrpclib.Server(server_url)
print server.newPackageUploaded('BingoUnity_0.7.tar.gz.gpg','6ce6ffb104cb6c17c6e58574a3eddad3','BingoUnity_0.7','unity','wine')
print server.newPackageUploaded('btpkivyquiz_1.0.tar.gz.gpg','6916148019d7dffbf6d84c7ade0d30ec','btpkivyquiz_1.0','kivy','python-kivy, gstreamer0.10-plugins-good')
print server.newPackageUploaded('KivyMemory_1.0.tar.gz.gpg','0beca8962e2d9a8c19340eed5a8d10c9','KivyMemory_1.0','kivy','python-kivy, gstreamer0.10-plugins-good')
print server.newPackageUploaded('btpkivy_1.0.tar.gz.gpg','e9acd06eba881677efa638bcb1b80a77','btpkivy_1.0','kivy','python-kivy, gstreamer0.10-plugins-good')
| Python |
## Test script for testing remote RPC CALLS WHICH NEED
## admin authentication
##
## Usage: python testAdminXMLRPC.py [local]
import os,sys
import xmlrpclib
#NODE Q3100083
user="6c626d102fe8"
pw="6c626d4ffcce"
if len(sys.argv)==2:
#assume local mode
server_url = 'http://%s:%s@localhost:8000/provisioner/xmlrpc/call/xmlrpc' % (user,pw)
else:
server_url = 'http://%s:%s@update.braintrainerplus.com/xmlrpc/call/xmlrpc' % (user,pw)
server = xmlrpclib.Server(server_url)
print "TIME"
print server.giveMeTime()
#Do a fake install of package 1 on node 6c626d10d7ba
print "Package download 9"
#print server.packageDownloadStarted(9) #package ID 6!
#No clouding of DB
#print "Package install 9"
print server.saveInstallLog(9,'logje van een kivy pakket','ok')
#NEW temp test for intteligent package info
print "NODE TAGS:"
print server.getCurrentNodeTags()
print "NODE PACKAGES:"
print server.getCurrentNodePackages()
print server.getCurrentPackageInfo()
| Python |
db.define_table('tag',
Field('name',requires=IS_NOT_IN_DB(db,'tag.name')),
Field('links','integer',default=0,writable=False),
format='%(name)s')
db.define_table('tag_link',
Field('tag',db.tag),
Field('table_name', 'string'),
Field('record_id','integer')) | Python |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
#########################################################################
if request.env.web2py_runtime_gae: # if running on Google App Engine
db = DAL('google:datastore') # connect to Google BigTable
# optional DAL('gae://namespace')
session.connect(request, response, db = db) # and store sessions and tickets there
### or use the following lines to store sessions in Memcache
# from gluon.contrib.memdb import MEMDB
# from google.appengine.api.memcache import Client
# session.connect(request, response, db = MEMDB(Client()))
else: # else use a normal relational database
db = DAL('sqlite://storage.sqlite') # if not, use SQLite or other DB
# by default give a view/generic.extension to all actions from localhost
# none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
from gluon.tools import Mail, Auth, Crud, Service, PluginManager, prettydate
mail = Mail() # mailer
auth = Auth(db) # authentication/authorization
crud = Crud(db) # for CRUD helpers using auth
service = Service() # for json, xml, jsonrpc, xmlrpc, amfrpc
plugins = PluginManager() # for configuring plugins
mail.settings.server = 'smtp.gmail.com:587' # your SMTP server 'logging' or
mail.settings.sender = 'formaticsmailer@gmail.com' # your email
mail.settings.login = 'formaticsmailer@gmail.com:1234hoedjevan' # your credentials or None
auth.settings.hmac_key = 'sha512:8d1781dc-ecc9-4fae-8f27-20ee8f9dacec' # before define_tables()
auth.define_tables(username=True)
# creates all needed tables
auth.settings.mailer = mail
# for user email verification
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.messages.verify_email = 'Click on the link http://'+request.env.http_host+URL('default','user',args=['verify_email'])+'/%(key)s to verify your email'
auth.settings.reset_password_requires_verification = True
auth.messages.reset_password = 'Click on the link http://'+request.env.http_host+URL('default','user',args=['reset_password'])+'/%(key)s to reset your password'
#allow basic login
#e.g.: wget --http-user=[username] --http-passwd=[password] http://.../[app]/[controller]/[function]/[args]
auth.settings.allow_basic_login = True
#auth.settings.actions_disabled.append('retrieve_username')
#auth.settings.actions_disabled.append('profile')
#########################################################################
## If you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.
## register with janrain.com, uncomment and customize following
# from gluon.contrib.login_methods.rpx_account import RPXAccount
# auth.settings.actions_disabled = \
# ['register','change_password','request_reset_password']
# auth.settings.login_form = RPXAccount(request, api_key='...',domain='...',
# url = "http://localhost:8000/%s/default/user/login" % request.application)
## other login methods are in gluon/contrib/login_methods
#########################################################################
crud.settings.auth = None # =auth to enforce authorization on crud
crud.settings.formstyle = 'table3cols'
#########################################################################
## Define your tables below (or better in another model file) for example
##
## >>> db.define_table('mytable',Field('myfield','string'))
##
## Fields can be 'string','text','password','integer','double','boolean'
## 'date','time','datetime','blob','upload', 'reference TABLENAME'
## There is an implicit 'id integer autoincrement' field
## Consult manual for more options, validators, etc.
##
## More API examples for controllers:
##
## >>> db.mytable.insert(myfield='value')
## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)
## >>> for row in rows: print row.id, row.myfield
#########################################################################
#Authorization groups
if (db(db.auth_group.role == 'admins').count() == 0):
auth.add_group('admins', 'admin group')
if (db(db.auth_group.role == 'operators').count() == 0):
auth.add_group('operators', 'operator group')
#TODO: default "f" pass is too short!!!
if (db(db.auth_user.id>0).count() == 0):
#Note:: the pass returned from validate is a tuple: only first part is needed!!!
db.auth_user.insert(first_name='Mr',last_name='Admin',email='support@formatics.nl',username='admin',password=db.auth_user.password.validate('secret')[0])
db.auth_user.insert(first_name='Mr',last_name='Operator',email='support@formatics.nl',username='operator',password=db.auth_user.password.validate('secret')[0])
auth.add_membership(1, 1) #admin belongs to Admins group
auth.add_membership(2, 1) #admin belongs also to Operators group
auth.add_membership(2, 2)#operator belongs top Operators group
#Authorization groups
#if (db(db.auth_group.role == 'admins').count() == 0):
# auth.add_group('admins', 'admin group')
#
#if (db(db.auth_group.role == 'nodes').count() == 0):
# auth.add_group('nodes', 'node group')
db.define_table('queue',
Field('status', 'string', requires=IS_IN_SET(('pending','sent', 'failed')), default='pending', writable=False, readable=False),
Field('email', 'string', requires=IS_EMAIL()),
Field('subject', 'string'),
Field('message'))
db.define_table('todo',
Field('title',label=T("Title"),
requires=(IS_NOT_EMPTY())),
Field('isCompleted','boolean',default=None),
auth.signature)
| Python |
import os
#The parent reference could also be done with:
# Field('parent', 'reference page'),
db.define_table('page',
Field('language',
requires=IS_IN_SET(('nl','en')),writable=False,readable=False),
Field('parent', type='integer', writable=False,readable=False,
label=T('Parent page')),
Field('title',default='',label=T("Title"),
requires=(IS_NOT_EMPTY())),
Field('short_title',default='',label=T("Short title, will appear in menu's"),
requires=(IS_NOT_EMPTY())),
Field('url',default='',label=T("Unique name of the URL"),
requires=(IS_SLUG(),IS_NOT_IN_DB(db,'page.url'))),
Field('isActive','boolean',default=True),
Field('isMenuitem','boolean',default=True,label=T("When enabled the page will appear in menu or submenu")),
Field('showChildren','boolean',default=True,label=T("When enabled the subdocuments of this page will be visible")),
Field('childrenTitle','string',label=T("Name the sub pages")),
Field('image', type='upload', label=T("Page icon")),
Field('imageThumb', type='upload', writable=False, readable=False, uploadfolder=os.path.join(request.folder,'static','images','thumbs'),),
Field('showSiblings','boolean',default=False,label=T("When enabled the siblingsMenu will be visible")),
Field('order_nr', type='integer', default=0, writable=False,readable=False),
Field('role',db.auth_group,
requires=IS_EMPTY_OR(IS_IN_DB(db,'auth_group.id','%(role)s'))),
Field('changelog',default=''),
auth.signature,
format = '%(title)s', migrate=True)
db.define_table('page_archive',
Field('current_record',db.page),
db.page,
format = '%(slug) %(modified_on)s', migrate=True)
db.define_table('page_item',
Field('page', db.page, writable=False,readable=False,
label=T('Page')),
Field('tablename', type='string', requires=IS_IN_SET(('page_text','page_image','page_file','page_link','page_youtube','page_form','page_image_slider','page_form','page_faq')), writable=False,readable=False,
label=T('Content type')),
Field('record_id', type='integer', writable=False,readable=False),
Field('order_nr', type='integer', default=0, writable=True,readable=True))
db.page_item.page.requires = IS_IN_DB(db, db.page.id, 'Language: %(language)s %(title)s')
db.define_table('page_text',
Field('title', type='string',
label=T('Define if you use subtitles')),
Field('type', type='string', writable=False,readable=False,
requires=IS_IN_SET(('html','markmin','textarea')), label=T('Parent page')),
Field('body', type='text',requires=(IS_NOT_EMPTY())))
db.define_table('page_text_archive',
Field('current_record',db.page_text),
db.page_text,
format = '%(title) %(modified_on)s', migrate=True)
db.define_table('page_image',
Field('title', type='string',
label=T('Define the image title')),
Field('image', type='upload',requires=(IS_NOT_EMPTY())),
Field('hasLightbox', type='boolean', label=T('Has lightbox?'), default=True))
db.define_table('page_file',
Field('title', type='string',
label=T('Define the file title'),requires=(IS_NOT_EMPTY())),
Field('file', type='upload',requires=(IS_NOT_EMPTY())))
db.define_table('page_picasa',
Field('userid', type='string',
label=T('Google Picasa User ID'), requires=(IS_NOT_EMPTY())),
Field('albumid', type='string',
label=T("Google Picasa Album ID"), requires=(IS_NOT_EMPTY())))
db.define_table('page_youtube',
Field('title', type='string',
label=T('Define a custom title')),
Field('youtube', type='string',
label=T("Youtube code"), requires=(IS_NOT_EMPTY())))
#TODO: Needs extra field for internal link or external link
db.define_table('page_link',
Field('title', type='string',
label=T('Name of the link'),requires=(IS_NOT_EMPTY())),
Field('link', type='string',requires=(IS_URL())))
db.define_table('page_faq',
Field('question', type='string',
label=T('Question'), requires=(IS_NOT_EMPTY())),
Field('answer', type='text', label=T("Answer"), requires=(IS_NOT_EMPTY())))
db.define_table('page_slider',
Field('width', type='string', label=T('width in pixels (max 680)'), requires=(IS_NOT_EMPTY())),
Field('height', type='string', label=T('height in pixels'), requires=(IS_NOT_EMPTY())),
Field('image1', type='upload', uploadfolder=os.path.join(request.folder,'static/temp'), requires=(IS_NOT_EMPTY())),
Field('image2', type='upload', uploadfolder=os.path.join(request.folder,'static/temp')),
Field('image3', type='upload', uploadfolder=os.path.join(request.folder,'static/temp')),
Field('image4', type='upload', uploadfolder=os.path.join(request.folder,'static/temp')),
Field('image5', type='upload', uploadfolder=os.path.join(request.folder,'static/temp')),
Field('image6', type='upload', uploadfolder=os.path.join(request.folder,'static/temp')),
Field('image7', type='upload', uploadfolder=os.path.join(request.folder,'static/temp')),
Field('image8', type='upload', uploadfolder=os.path.join(request.folder,'static/temp')),
Field('image9', type='upload', uploadfolder=os.path.join(request.folder,'static/temp')),
Field('image10', type='upload', uploadfolder=os.path.join(request.folder,'static/temp')))
db.define_table('page_form',
Field('form_type', type='string', label=T('Choose a form to include'), requires=IS_IN_SET(('Support','Question','Demo','Offers','Contactpage'))))
| Python |
# This file was developed by Massimo Di Pierro
# It is released under BSD, MIT and GPL2 licenses
###################################################
# required parameters set by default if not set
###################################################
DEFAULT = {
'editor' : auth.user and auth.has_membership(role='editor') or auth.user_id==1, # if current user a editor?
'mode' : 'markmin', # 'markmin' or 'html' for wysiwyg editor
'level' : 3, # 1 - wiki only, 2 - widgets enables, 3 - remplate render enabled
'migrate': True, # set to False in production
'theme' : 'redmond', # the jquery-ui theme, mapped into plugin_wiki/ui/%(theme)s/jquery-ui-1.8.1.custom.css
'widgets' : 'all', # list of widgets to be made available
'authorize_attachments' : False # shoudl attachment be restricted to the page?
}
def _():
"""
the mambo jambo here makes sure that
PluginManager.wiki.xxx is also exposed as plugin_wiki_xxx
this is to minimize Storage lookup
"""
if not 'db' in globals() or not 'auth' in globals():
raise HTTP(500,"plugin_wiki requires 'db' and 'auth'")
from gluon.tools import PluginManager
prefix='plugin_wiki_'
keys=dict(item for item in DEFAULT.items() if not prefix+item[0] in globals())
plugins = PluginManager('wiki',**keys)
globals().update(dict((prefix+key,keys[key]) for key in keys))
_()
###################################################
# js and css modules required by the plugin
###################################################
for _f in ['plugin_wiki/ui/css/%s/jquery-ui-1.8.5.custom.css' % plugin_wiki_theme,
'plugin_wiki/ui/js/jquery-ui-1.8.5.custom.min.js',
'plugin_wiki/jqgrid/ui.jqgrid.css',
'plugin_wiki/jqgrid/i18n/grid.locale-en.js',
'plugin_wiki/jqgrid/jquery.jqGrid.min.js',
'plugin_wiki/slideshow/jquery.cycle.min.js',
'plugin_wiki/multiselect/jquery.multiselect.css',
'plugin_wiki/multiselect/jquery.multiselect.js',
'plugin_wiki/rating/jquery.rating.css',
'plugin_wiki/rating/jquery.rating.js']:
response.files.append(URL('static',_f))
###################################################
# required tables
###################################################
db.define_table('plugin_wiki_page',
Field('slug',writable=False,
requires=(IS_SLUG(),IS_NOT_IN_DB(db,'plugin_wiki_page.slug'))),
Field('title',default='',
requires=(IS_NOT_EMPTY(),IS_NOT_IN_DB(db,'plugin_wiki_page.title'))),
Field('is_public','boolean',default=True),
Field('body','text',default=''),
Field('role',db.auth_group,
requires=IS_EMPTY_OR(IS_IN_DB(db,'auth_group.id','%(role)s'))),
Field('changelog',default=''),
auth.signature,
format = '%(slug)s', migrate=plugin_wiki_migrate)
db.define_table('plugin_wiki_page_archive',
Field('current_record',db.plugin_wiki_page),
db.plugin_wiki_page,
format = '%(slug) %(modified_on)s', migrate=plugin_wiki_migrate)
db.define_table('plugin_wiki_attachment',
Field('tablename',writable=False,readable=False),
Field('record_id','integer',writable=False,readable=False),
Field('name',requires=IS_NOT_EMPTY()),
Field('filename','upload',requires=IS_NOT_EMPTY(),autodelete=True),
auth.signature,
format='%(name)s', migrate=plugin_wiki_migrate)
db.define_table('plugin_wiki_comment',
Field('tablename',
writable=False,readable=False),
Field('record_id','integer',
writable=False,readable=False),
Field('body',requires=IS_NOT_EMPTY(),label='Your comment'),
auth.signature,
migrate=plugin_wiki_migrate)
db.define_table('plugin_wiki_tag',
Field('name',requires=IS_NOT_IN_DB(db,'plugin_wiki_tag.name')),
Field('links','integer',default=0,writable=False),
auth.signature,
format='%(name)s', migrate=plugin_wiki_migrate)
db.define_table('plugin_wiki_link',
Field('tag',db.plugin_wiki_tag),
Field('table_name'),
Field('record_id','integer'), migrate=plugin_wiki_migrate)
db.define_table('plugin_wiki_rating',
Field('tablename'),
Field('record_id','integer'),
Field('rating','double'),
Field('counter','integer'))
db.define_table('plugin_wiki_rating_aux',
Field('master',db.plugin_wiki_rating),
Field('rating','double'),
Field('created_by',db.auth_user))
###################################################
# widgets embeddable in wiki pages
###################################################
class PluginWikiWidgets(object):
"""
todo:
toc
in-place-wiki-edit
permission managemnt
voting plugin
"""
###############################################
# basic crud widgets (no ajax)
###############################################
@staticmethod
def read(table,record_id=None):
"""
## read and display a record
- ``table`` is the name of a table
- ``record_id`` is a record number
"""
if not record_id: record_id=request.args(-1)
if not record_id.isdigit(): return XML('no data')
return crud.read(db[table],record_id)
@staticmethod
def _set_field_attributes(table,readonly_fields='',hidden_fields='',default_fields=''):
if readonly_fields:
for f in readonly_fields.split(','):
db[table][f.strip()].writable=False
if hidden_fields:
for f in hidden_fields.split(','):
db[table][f.strip()].writable=False
db[table][f.strip()].readable=False
if default_fields:
for f in default_fields.split(','):
(key,value) = f.split('=')
db[table][key.strip()].default=value.strip()
@staticmethod
def create(table,message='',next='',readonly_fields='',
hidden_fields='',default_fields=''):
"""
## display a record create form
- ``table`` is the name of a table
- ``message`` is a the message to be displayed after record is created
- ``next`` is where to redirect, example "page/index/[id]"
- ``readonly_fields`` is a list of comma separated fields
- ``hidden_fields`` is a list of comma separated fields
- ``default_fields`` is a list of comma separated "fieldname=value"
"""
PluginWikiWidgets._set_field_attributes(table, readonly_fields,hidden_fields,default_fields)
return crud.create(db[table],message=message,next=next)
@staticmethod
def update(table,record_id='',message='',next='',
readonly_fields='',hidden_fields='',default_fields=''):
"""
## display a record update form
- ``table`` is the name of a table
- ``record_id`` is he record to be updated or {{=request.args(-1)}}
- ``message`` is a the message to be displayed after record is created
- ``next`` is where to redirect, example "page/index/[id]"
- ``readonly_fields`` is a list of comma separated fields
- ``hidden_fields`` is a list of comma separated fields
- ``default_fields`` is a list of comma separated "fieldname=value"
"""
PluginWikiWidgets._set_field_attributes(table, readonly_fields,hidden_fields,default_fields)
if not record_id: record_id=request.args(-1)
if not record_id.isdigit(): record_id=None
return crud.update(db[table],record_id,message=message,next=next)
@staticmethod
def select(table,query_field='',query_value='',fields=''):
"""
## Lists all records in the table
- ``table`` is the name of a table
- ``query_field`` and ``query_value`` if present will filter records by query_field==query_value
- ``fields`` is a list of comma separate fields to be displayed
"""
query=None
if query_field:
query = db[table][query_field]==query_value
if fields:
fields=['%s.%s' % (table,f.strip()) for f in fields.split(',')]
else:
fields=None
return crud.select(db[table],query=query,fields=fields,headers='fieldname:capitalize')
@staticmethod
def search(table,fields=''):
"""
## A Widgets for selecting records
- ``table`` is the name of a table
- ``fields`` is a list of comma separated fields to be displayed
"""
if fields:
fields=['%s.%s' % (table,f.strip()) for f in fields.split(',')]
else:
fields=None
search, results = crud.search(db[table])
if not results: results=T('no results')
else: results=SQLTABLE(results,fields=fields,headers='fieldname:capitalize')
return DIV(search,results)
###############################################
# advanced crud (jqgrid with ajax search)
###############################################
@staticmethod
def jqgrid(table,fieldname=None,fieldvalue=None,col_widths='',
colnames=None,_id=None,fields='',
col_width=80,width=700,height=300,id=None):
"""
## Embed a jqGrid plugin
- ``table`` is the table name
- ``fieldname``, ``fieldvalue`` are an optional filter (fieldname==fieldvalue)
- ``_id`` is the "id" of the DIV that contains the jqGrid
- ``fields`` is a list of columns names to be displayed
- ``colnames`` is a list of column headers
- ``col_width`` is the width of each column (default)
- ``height`` is the height of the jqGrid
- ``width`` is the width of the jqGrid
"""
from gluon.serializers import json
_id = id or 'jqgrid_%s' % table
if not fields:
fields = [x.strip() for x in db[table].fields if db[table][x.strip()].readable]
elif isinstance(fields,str):
fields = [x.strip() for x in fields.split(',')]
else:
fields = fields
if col_widths:
if isinstance(col_widths,(list,tuple)):
col_widths = [str(x) for x in col_widths]
else:
col_widths = [x.strip() for x in col_widths.split(',')]
if width=='auto':
width=sum([int(x) for x in col_widths])
elif not col_widths:
col_widths = [col_width for x in fields]
if isinstance(colnames,str):
colnames = [x.strip() for x in colnames.split(',')]
else:
colnames = [(db[table][x].label or x) for x in fields]
colmodel = [{'name':x,'index':x, 'width':col_widths[i], 'sortable':True}\
for i,x in enumerate(fields)]
callback = URL('plugin_wiki','jqgrid',
vars=dict(tablename=table,
columns=','.join(fields),
fieldname=fieldname or '',
fieldvalue=fieldvalue,
),
hmac_key=auth.settings.hmac_key,
salt=auth.user_id
)
script="""
jQuery(document).ready(function(){jQuery("#%(id)s").jqGrid({ url:'%(callback)s', datatype: "json", colNames: %(colnames)s,colModel:%(colmodel)s, rowNum:10, rowList:[20,50,100], pager: '#%(id)s_pager', viewrecords: true,height:%(height)s});jQuery("#%(id)s").jqGrid('navGrid','#%(id)s_pager',{search:true,add:false,edit:false,del:false});jQuery("#%(id)s").setGridWidth(%(width)s,false);jQuery('select.ui-pg-selbox,input.ui-pg-input').css('width','50px');});
""" % dict(callback=callback,colnames=json(colnames),
colmodel=json(colmodel),id=_id,height=height,width=width)
return TAG[''](TABLE(_id=_id),
DIV(_id=_id+"_pager"),
SCRIPT(script))
###############################################
# scientific widgets (latex, charting)
###############################################
@staticmethod
def latex(expression):
"""
## Uses Google charting API to embed LaTeX
"""
return XML('<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" align="center"/>' % expression.replace('"','\"'))
@staticmethod
def pie_chart(data='1,2,3',names='a,b,c',width=300,height=150,align='center'):
"""
## Uses Google charting API to embed a pie chart
- ``data`` is a list of comma separated values
- ``names`` is a list of comma separated labels (one for data item)
- ``width`` is the width of the image
- ``height`` is the height of the image
- ``align`` determines the alignment of the image
"""
if isinstance(data,str):
data = data.replace(' ','')
elif isinstance(data,dict):
data = '|'.join(','.join(str(y) for y in s) for s in data.values())
elif isinstance(data,list):
data = ','.join(str(y) for y in data)
if isinstance(names,str):
names = '|'.join(name.strip() for name in names.split(','))
else:
names = '|'.join(name for name in names)
return XML('<img src="http://chart.apis.google.com/chart?cht=p3&chd=s:cEj9U&chs=%(width)sx%(height)s&chl=%(names)s&chd=t:%(data)s" align="%(align)s">' % dict(data=data,width=width,height=height,names=names,align=align))
@staticmethod
def bar_chart(data='1,2,3',names='a,b,c',width=300,height=150,align='center'):
"""
## Uses Google charting API to embed a bar chart
- ``data`` is a list of comma separated values
- ``names`` is a list of comma separated labels (one for data item)
- ``width`` is the width of the image
- ``height`` is the height of the image
- ``align`` determines the alignment of the image
"""
if isinstance(data,str):
data = data.replace(' ','')
elif isinstance(data,dict):
data = '|'.join(','.join(str(y) for y in s) for s in data.values())
elif isinstance(data,list):
data = ','.join(str(y) for y in data)
if isinstance(names,str):
names = '|'.join(name.strip() for name in names.split(','))
else:
names = '|'.join(name for name in names)
height=int(width)/2
return XML('<img src="http://chart.apis.google.com/chart?chxt=x,y&cht=bvs&chd=s:cEj9U&chls=2.0&chs=%(width)sx%(height)s&chxl=0:|%(names)s&chd=t:%(data)s" align="%(align)s">' % dict(data=data,width=width,height=height,names=names,align=align))
###############################################
# media widgets
###############################################
@staticmethod
def slideshow(links=None,table=None,field='image',transition='fade',width=200,height=200):
"""
## Embeds a slideshow
It gets the images from a table
- ``table`` is the table name
- ``field`` is the upload field in the table that contains images
- ``transition`` determines the type of transition, e.g. fade, etc.
- ``width`` is the width of the image
- ``height`` is the height of the image
"""
import random
id=str(random.random())[2:]
if table:
rows = db(db[table].id>0).select()
if db[table][field].type=='upload':
images = [IMG(_src=URL('default','download',args=row[field])) for row in rows]
else:
images = [IMG(_src=row[field]) for row in rows]
elif links:
images = [IMG(_src=link) for link in links.split(',')]
else:
images = []
return DIV(SCRIPT("jQuery(document).ready(function() {jQuery('#slideshow%s').cycle({fx: '%s'});});" % (id,transition)),DIV(_id='slideshow'+id,*images))
@staticmethod
def youtube(code,width=400,height=250):
"""
## Embeds a youtube video (by code)
- ``code`` is the code of the video
- ``width`` is the width of the image
- ``height`` is the height of the image
"""
return XML("""<object width="%(width)s" height="%(height)s"><param name="movie" value="http://www.youtube.com/v/%(code)s&hl=en_US&fs=1&"></param><param name="allowFullScreen" value="true"></param><param name="allowscriptaccess" value="always"></param><embed src="http://www.youtube.com/v/%(code)s&hl=en_US&fs=1&" type="application/x-shockwave-flash" allowscriptaccess="always" allowfullscreen="true" width="%(width)s" height="%(height)s"></embed></object>""" % dict(code=code, width=width, height=height))
@staticmethod
def vimeo(code,width=400,height=250):
"""
## Embeds a viemo video (by code)
- ``code`` is the code of the video
- ``width`` is the width of the image
- ``height`` is the height of the image
"""
return XML("""<object width="%(width)s" height="%(height)s"><param name="allowfullscreen" value="true" /><param name="allowscriptaccess" value="always" /><param name="movie" value="http://vimeo.com/moogaloop.swf?clip_id=%(code)s&server=vimeo.com&show_title=1&show_byline=1&show_portrait=0&color=&fullscreen=1" /><embed src="http://vimeo.com/moogaloop.swf?clip_id=%(code)s&server=vimeo.com&show_title=1&show_byline=1&show_portrait=0&color=&fullscreen=1" type="application/x-shockwave-flash" allowfullscreen="true" allowscriptaccess="always" width="%(width)s" height="%(height)s"></embed></object>""" % dict(code=code, width=width, height=height))
@staticmethod
def mediaplayer(src,width=400,height=250):
"""
## Embeds a media file (such as flash video or an mp3 file)
- ``src`` is the src of the video
- ``width`` is the width of the image
- ``height`` is the height of the image
"""
return XML('<embed allowfullscreen="true" allowscriptaccess="always" flashvars="height=%(height)s&width=%(width)s&file=%(src)s" height="%(height)spx" src="%(url)s" width="%(width)spx"></embed>'%dict(url=URL('static','plugin_wiki/mediaplayer.swf'),src=src,width=width,height=height))
###############################################
# social widgets (comments and tags)
###############################################
@staticmethod
def comments(table='None',record_id=None):
"""
## Embeds comments in the page
Comments can be linked to a table and/or a record
- ``table`` is the table name
- ``record_id`` is the id of the record
"""
return LOAD('plugin_wiki','comment',
args=(table,record_id or 0),ajax=True)
@staticmethod
def tags(table='None',record_id=None):
"""
## Embeds tags in the page
tags can be linked to a table and/or a record
- ``table`` is the table name
- ``record_id`` is the id of the record
"""
return LOAD('plugin_wiki','tags',
args=(table,record_id or 0),ajax=True)
@staticmethod
def tag_cloud():
"""
## Embeds a tag cloud
"""
return LOAD('plugin_wiki','cloud')
@staticmethod
def aggregator(feed, max_entries=5):
"""
## Embeds a feed aggregator
- ``name`` is the aggregator name
- ``feed`` is a list comma separated feed urls (http://rss.cbc.ca/lineup/topstories.xml)
- ``max_entries`` is the max number of displayed entries (default=5)
"""
import gluon.contrib.feedparser as feedparser
lfeeds = isinstance(feeds,(str,unicode)) and feeds or feeds.split(",")
content = DIV(A(d.channel.title,_href=d.channel.link,_rel=d.channel.description),
UL(),_id='web2py_aggregator')
for feed in lfeeds:
d = feedparser.parse(feed)
for entry in d.entries[:max_entried]:
content[1] += LI(A(entry.title,' ',SPAN(entry.updated),
_href=entry.link,_rel=entry.descstiption,
_class=web2py_aggregator_link))
return content
@staticmethod
def map(key='ABQIAAAAT5em2PdsvF3z5onQpCqv0RTpH3CbXHjuCVmaTc5MkkU4wO1RRhQHEAKj2S9L72lEMpvNxzLVfJt6cg',
table='auth_user', width=400, height=200):
"""
## Embeds a Google map
Gets points on the map from a table
- ``key`` is the google map api key (default works for 127.0.0.1)
- ``table`` is the table name
- ``width`` is the map width
- ``height`` is the map height
The table must have columns: latidude, longitude and map_popup.
When clicking on a dot, the map_popup message will appear.
"""
import os
import gluon.template
content = open(os.path.join(request.folder,'views','plugin_wiki',
'map.html'),'rb').read()
context = dict(googlemapkey=key, rows=db(db[table].id>0).select(),
width='%spx'%width,height='%spx'%height)
return XML(gluon.template.render(content=content,context=context))
@staticmethod
def star_rating(table, record_id, splitstars=False):
"""
## Create a star rating widget for a given record in given table.
if given table has a field called 'rating' average rating will also added to it.
it will save ratings from logedin users only.
- ``table`` is the table name
- ``record_id`` is the id of record for which to show a rating widget.
- ``splitstars`` if set to True will display split stars.
"""
import uuid
id = uuid.uuid4()
row=db(db.plugin_wiki_rating.tablename==table)(db.plugin_wiki_rating.record_id==record_id).select().first()
rating = row.rating if row else 0
callback = URL('plugin_wiki', 'star_rate', args = [table,record_id])
incr = 0.5 if splitstars else 1
return TAG[''](DIV(_id='star'+str(id),_class='rating'),
SCRIPT("jQuery(document).ready(function(){jQuery('%(uid)s').rating('%(callback)s',{increment:%(incr)s, maxvalue:5, curvalue:%(rating)s});});" % dict(uid='#star'+str(id), callback=callback,incr=incr, rating=rating)))
@staticmethod
def iframe(src, width=400, height=300):
"""
embed a page in an <iframe></iframe>
"""
return TAG.iframe(_src=src, _width=width, _height=height)
@staticmethod
def load_url(src):
"""
loads the contenct of the url via ajax
and traps forms
"""
return LOAD(url=src)
@staticmethod
def load_action(action, controller='', ajax=True):
"""
loads the content of URL(request.application, controller, action) via ajax
and traps forms
"""
return LOAD(controller, action, ajax=ajax)
###################################################
# main class to intantiate the widget
###################################################
class PluginWiki(object):
def __init__(self):
import re
regex = re.compile('(?P<s> *)(?P<t>.+) +(?P<k>\S+)')
if T.accepted_language=="nl": menu_page = db(db.plugin_wiki_page.slug=='meta-menu-nl').select().first()
else: menu_page = db(db.plugin_wiki_page.slug=='meta-menu-en').select().first()
code_page = db(db.plugin_wiki_page.slug=='meta-code').select().first()
if code_page and request.controller=='plugin_wiki' and not request.function in ('page_edit', 'page_history'):
try:
exec(re.sub('\r\n|\n\r|\r','\n',code_page.body.strip()),
globals())
except Exception, e:
import traceback
if plugin_wiki_editor:
response.flash = DIV(H4('Execution error in page _proc'),
PRE(traceback.format_exc()))
else:
response.flash = 'Internal error, please contact the administrator'
# if menu_page:
# response.menu=[]
# response.menu.append((T("Home"), False, URL('default','index'), []))
# parents = [(-1,response.menu)]
# for line in menu_page.body.split('\n'):
# match = regex.match(line)
# if not match: continue
# indent=len(match.group('s'))
# title=match.group('t')
# url=match.group('k')
# if url.lower()=='none':
# url=URL('plugin_wiki','page',args=request.args)
# elif url.startswith('page:'):
# url=URL('plugin_wiki','page',args=url[5:])
# while indent<=parents[-1][0]:
# parents.pop()
# newtree=[]
# parents[-1][1].append((title,False,url,newtree))
# parents.append((indent,newtree))
# self.extra = self.extra_blocks()
# response.menu.append((T("Contact"), False, URL('default','contact'), []))
#
# if plugin_wiki_editor:
# response.menu.append(('Admin',False,URL('plugin_wiki','index')))
# this embeds page attachments
class attachments(object):
def __init__(self,tablename,record_id=0,
caption='Attachments',close="Close",
id=None,width=70,height=70,
source=None):
import uuid
self.tablename=tablename
self.record_id=record_id
self.caption=caption
self.close=close
self.id=id or str(uuid.uuid4())
self.width=width
self.height=height
if not source:
source=URL('plugin_wiki','attachments',args=(tablename,record_id))
self.source = source
def xml(self):
return '<div id="%(id)s" style="display:none"><div style="position:fixed;top:0%%;left:0%%;width:100%%;height:100%%;background-color:black;z-index:1001;-moz-opacity:0.8;opacity:.80;opacity:0.8;"></div><div style="position:fixed;top:%(top)s%%;left:%(left)s%%;width:%(width)s%%;height:%(height)s%%;padding:16px;border:2px solid black;background-color:white;opacity:1.0;z-index:1002;overflow:auto;-moz-border-radius: 10px; -webkit-border-radius: 10px;"><span style="font-weight:bold">%(title)s</span><span style="float:right">[<a href="#" onclick="jQuery(\'#%(id)s\').hide();return false;">%(close)s</a>]</span><hr/><div style="width:100%%;height:90%%;" id="c%(id)s"><iframe id="attachments_modal_content" style="width:100%%;height:100%%;border:0">%(loading)s</iframe></div></div></div><a href="#" onclick="jQuery(\'#attachments_modal_content\').attr(\'src\',\'%(source)s\');jQuery(\'#%(id)s\').fadeIn(); return false" id="plugin_wiki_open_attachments"">%(title)s</a>' % dict(title=self.caption,source=self.source,close=self.close,id=self.id,left=(100-self.width)/2,top=(100-self.height)/2,width=self.width,height=self.height,loading=T('loading...'))
class widget_builder(object):
def __init__(self,
caption='Widget Builder',close="Close",
id=None,width=70,height=70):
import uuid
self.caption=caption
self.close=close
self.id=id or str(uuid.uuid4())
self.width=width
self.height=height
self.source=URL('plugin_wiki','widget_builder')
def xml(self):
return '<div id="%(id)s" style="display:none"><div style="position:fixed;top:0%%;left:0%%;width:100%%;height:100%%;background-color:black;z-index:1001;-moz-opacity:0.8;opacity:.80;opacity:0.8;"></div><div style="position:fixed;top:%(top)s%%;left:%(left)s%%;width:%(width)s%%;height:%(height)s%%;padding:16px;border:2px solid black;background-color:white;opacity:1.0;z-index:1002;overflow:auto;-moz-border-radius: 10px; -webkit-border-radius: 10px;"><span style="font-weight:bold">%(title)s</span><span style="float:right">[<a href="#" onclick="jQuery(\'#%(id)s\').hide();return false;">%(close)s</a>]</span><hr/><div style="width:100%%;height:90%%;" id="c%(id)s"><iframe id="widget_builder_modal_content" style="width:100%%;height:100%%;border:0">%(loading)s</iframe></div></div></div><a href="#" onclick="jQuery(\'#widget_builder_modal_content\').attr(\'src\',\'%(source)s\');jQuery(\'#%(id)s\').fadeIn(); return false" id="plugin_wiki_open_attachments"">%(title)s</a>' % dict(title=self.caption,source=self.source,close=self.close,id=self.id,left=(100-self.width)/2,top=(100-self.height)/2,width=self.width,height=self.height,loading=T('loading...'))
def pdf(self,text):
if not plugin_wiki_mode=='markmin':
raise RuntimeError, "Not supported"
response.headers['content-type'] = 'application/pdf'
return "Not implemented"
def render(self,text,level=plugin_wiki_level,page_url=URL()):
import re
if plugin_wiki_mode=='html':
return self.render_html(text,page_url)
elif plugin_wiki_mode=='markmin':
return self.render_markmin(text,page_url)
else:
raise RuntimeError, "Not supported"
def parse_value(self,code):
code = code.replace('[page]',request.args(0))
code = code.replace('[id]',request.args(1) or '')
code = code.replace('[application]',request.application)
code = code.replace('[client]',request.client)
if plugin_wiki_level>2:
import gluon.template
return gluon.template.render(code,context=globals())
return code
def render_widget(self,code):
try:
items = [x.strip().split(':',1) for x in code.split('\n')]
args = dict((item[0].strip(), self.parse_value(item[1].strip())) for item in items)
name = args.get('name','')
if not name or name[0]=='_': return 'ERROR'
del args['name']
html = getattr(PluginWikiWidgets,name)(**args)
if isinstance(html,str):
return html
elif html:
return html.xml()
else:
''
except Exception,e:
if plugin_wiki_editor:
import traceback
return '<div class="error"><pre>%s</pre></div>' % traceback.format_exc()
else:
return '<div class="error">system error</div>'
def render_template(self,code):
import gluon.template
try:
return gluon.template.render(code,context=globals())
except Exception,e:
if plugin_wiki_editor:
import traceback
return '<div class="error"><pre>%s</pre></div>' % traceback.format_exc()
else:
return '<div class="error">system error</div>'
def extra_blocks(self):
extra = {}
LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" align="center"/>'
extra['latex'] = lambda code: LATEX % code.replace('"','\"')
extra['verbatim'] = lambda code: cgi.escape(code)
extra['code'] = lambda code: CODE(code,language=None).xml()
extra['code_python'] = lambda code: CODE(code,language='python').xml()
extra['code_c'] = lambda code: CODE(code,language='c').xml()
extra['code_cpp'] = lambda code: CODE(code,language='cpp').xml()
extra['code_java'] = lambda code: CODE(code,language='java').xml()
extra['code_html_plain'] = lambda code: CODE(code,language='html_plain').xml()
extra['code_html'] = lambda code: CODE(code,language='html').xml()
extra['code_web2py'] = lambda code: CODE(code,language='web2py').xml()
if plugin_wiki_level>1:
extra['widget'] = lambda code: self.render_widget(code)
if plugin_wiki_level>2:
extra['template'] = lambda code: self.render_template(code)
return extra
def render_markmin(self,text,page_url=URL()):
import re
att_url = URL(request.application,'plugin_wiki','attachment')
session.plugin_wiki_attachments=[]
def register(match):
session.plugin_wiki_attachments.append(match.group('h'))
return '[[%s %s/%s' % (match.group('t'),att_url,match.group('h'))
text = re.sub('\[\[(?P<t>[^\[\]]+)\s+attachment:\s*(?P<h>[\w\-\.]+)',
register,text)
text = re.sub('\[\[(?P<t>[^\[\]]+) page:','[[\g<t> %s/' % page_url,text)
return MARKMIN(text,extra=self.extra)
def render_html(self,text,page_url=URL()):
import re
text = text.replace('href="page:','href="%s/' % page_url)
att_url = URL(r=request,c='plugin_wiki',f='attachment')
text = text.replace('src="attachment:', 'src="%s/' % att_url)
regex_code = re.compile('``(?P<t>.*?)``:(?P<c>\w+)',re.S)
while True:
match=regex_code.search(text)
if not match:
break
if match.group('c') in self.extra:
code = match.group('t').strip().replace('<br>','')
html = self.extra[match.group('c')](code)
text = text[:match.start()]+html+text[match.end():]
return XML(text,sanitize=plugin_wiki_level<2)
def embed_page(self,slug):
page=db(db.plugin_wiki_page.slug==slug).select().first()
if not page: return page
return XML(plugin_wiki.render(page.body))
def widget(self,name,*args,**kargs):
return getattr(PluginWikiWidgets,name)(*args,**kargs)
plugin_wiki=PluginWiki()
| Python |
# -*- coding: utf-8 -*-
"""
@author: Bruno Cezar Rocha
@titter: @rochacbruno
@company: blouweb.com
@depends: http://www.wbotelhos.com/gridy/ - Jquery Gridy Plugin
@include: http://nyromodal.nyrodev.com/ - nyroModal
@include: http://css3buttons.michaelhenriksen.dk/ - CSS3 Buttons
@depends: http://www.web2py.com - web2py Faster, Better and more easily web development!
@license for Gridy library and PowerGrid Plugin
The MIT License
Copyright (c) 2010 Washington Botelho dos Santos (jquery.gridy)
Copyright (c) 2011 Bruno Cezar Rocha (PowerGrid Plugin for web2py)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@DONATE! PayPal - rochacbruno@gmail.com
Go VEGAN!
"""
#remove comment below if any problem
from gluon.custom_import import track_changes
track_changes()
if not response.generic_patterns:
response.generic_patterns = ['*.json','*.load','*.html']
import copy
global_env = copy.copy(globals())
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
cond = False
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
if not 'db' in globals():
db = databases.values()[0]
else:
db = globals()['db']
if not 'crud' in globals():
crud = Crud(db)
| Python |
import logging
import uuid
import datetime
module_logger = logging.getLogger('SP.models.server')
## Game info ##
db.define_table('game_info',
Field('game_name', type='string', requires=(IS_NOT_EMPTY())),
Field('maxlevels', type='integer', requires=(IS_NOT_EMPTY())),
Field('levelupcount', type='integer', requires=(IS_NOT_EMPTY())))
if db(db.game_info).count() == 0:
db.game_info.insert(game_name='quiz', maxlevels=6, levelupcount=3)
db.game_info.insert(game_name='memory', maxlevels=6, levelupcount=3)
## Specific game info ##
db.define_table('quiz_conf',
Field('level',type='integer', requires=(IS_NOT_EMPTY())),
Field('answers',type='integer', requires=(IS_NOT_EMPTY())))
if db(db.quiz_conf).count() == 0:
db.quiz_conf.insert(level=1, answers=2) # level 1 = 2 answers
db.quiz_conf.insert(level=2, answers=2) # level 2 = 2 answers
db.quiz_conf.insert(level=3, answers=4) # level 3 = 4 answers
db.quiz_conf.insert(level=4, answers=4) # level 4 = 4 answers
db.quiz_conf.insert(level=5, answers=4) # level 5 = 4 answers
db.quiz_conf.insert(level=6, answers=4) # level 6 = 4 answers
db.define_table('memory_conf',
Field('level',type='integer', requires=(IS_NOT_EMPTY())),
Field('cards',type='integer', requires=(IS_NOT_EMPTY())))
if db(db.memory_conf).count() == 0:
db.memory_conf.insert(level=1, cards=6) # level 1 = 2 answers
db.memory_conf.insert(level=2, cards=8) # level 2 = 2 answers
db.memory_conf.insert(level=3, cards=12) # level 3 = 4 answers
db.memory_conf.insert(level=4, cards=16) # level 4 = 4 answers
db.memory_conf.insert(level=5, cards=20) # level 5 = 4 answers
db.memory_conf.insert(level=6, cards=24) # level 6 = 4 answers
## Session stuff ##
db.define_table('session',
Field('username', type='string', requires=(IS_NOT_EMPTY())),
Field('password', type='string', requires=(IS_NOT_EMPTY())),
Field('level', type='integer', requires=(IS_NOT_EMPTY()), default=1),
Field('start_time', type='datetime', default=request.now),
Field('end_time', type='datetime', default=None),
Field('is_closed', type='boolean', default=False))
db.define_table('level_time',
Field('session_id', type='string', requires=(IS_NOT_EMPTY())),
Field('start_time', type='string', requires=(IS_NOT_EMPTY())),
Field('end_time', type='string', default=''))
## Logging ##
db.define_table('rpc_call',
Field('username', type='string', requires=(IS_NOT_EMPTY())),
Field('session', type=db.session, requires=(IS_NOT_EMPTY())),
Field('call', type='string', requires=(IS_NOT_EMPTY())),
Field('parameters', type='list:string', default=None),
Field('return_value', type='string', default=None),
Field('date', type='string', default=request.now))
def _get_session_id():
"""NOT USED AT THE MOMENT"""
username = auth.user['username']
password = auth.user['password']
session_id = 0
count = 0
while 1:
session_id = str(uuid.uuid4())
count = db((db.session.username==username) & (db.session.password==password)
& (db.session.id==session_id)).count()
if count <= 0:
break
module_logger.debug("Get unique session ID: %s" % session_id)
log_rpc_call(session_id, "_get_session_id", return_value=session_id)
return session_id
def _open_and_return_session():
username = auth.user['username']
password = auth.user['password']
row = db.session.insert(username=username, password=password)
session_id = row.id
module_logger.debug("Opened new session for the client.")
log_rpc_call(session_id, "_open_sesion")
return session_id
def _close_session(session_id):
username = auth.user['username']
password = auth.user['password']
db((db.session.username==username) & (db.session.password==password)
& (db.session.id==session_id)).update(end_time=request.now, is_closed=True)
module_logger.debug("Closed current session.")
param = ['session_id:%s' % session_id]
log_rpc_call(session_id, "_close_session", parameters=param)
def _get_levelupcount(session_id, game_name):
row = db(db.game_info.game_name==game_name).select(db.game_info.levelupcount).first()
levelupcount = row['levelupcount']
module_logger.debug("Get levelupcount: %s" % levelupcount)
param = ['session_id:%s' % session_id, 'game_name:%s'%game_name]
log_rpc_call(session_id, "_get_levelupcount", parameters=param, return_value=levelupcount)
return levelupcount
def _get_levels(session_id, game_name):
row = db(db.game_info.game_name==game_name).select(db.game_info.maxlevels).first()
maxlevels = row['maxlevels']
module_logger.debug("Get amount of playable levels: %s" % maxlevels)
param = ['session_id:%s' % session_id]
log_rpc_call(session_id, "_get_levels", parameters=param, return_value=maxlevels)
return maxlevels
def _get_user(session_id):
user = auth.user['username']
module_logger.debug("Get name of user: %s" % user)
param = ['session_id:%s' % session_id]
log_rpc_call(session_id, "_get_user", parameters=param, return_value=user)
return user
def _get_difficulty(session_id):
username = auth.user['username']
password = auth.user['password']
row = db((db.session.username==username) & (db.session.password==password)
& (db.session.id==session_id)).select(db.session.level).first()
level = row['level']
module_logger.debug("Get level difficulty: %s" % level)
param = ['session_id:%s' % session_id]
log_rpc_call(session_id, "_get_difficulty", parameters=param, return_value=level)
return level
def _set_level_end(session_id, game_data):
module_logger.debug('Core received: %s' % game_data)
level = game_data['level']
store_db = game_data['store_db']
levelup = game_data['levelup']
score = game_data['score']
if score < 5.0:
module_logger.debug('score < 5.0 or None, set levelup to false')
if level > 1:
module_logger.debug('Decrease level value with one')
level -= 1
if level < 1:
level = 1
levelup = False
elif 5 < score < 7:
module_logger.debug('Set levelup to false')
levelup = False
else:
if level < 6 and levelup:
module_logger.debug("Increase level with one")
level += 1
if store_db:
module_logger.debug("Store score in db: %s" % score)
set_difficulty(session_id, level)
#_save_score(session_id, score)
param = ['session_id:%s'%session_id,'game_data:%s'%game_data]
log_rpc_call(session_id, "_set_level_end", parameters=param)
def _set_start_time(session_id):
cur_time = current_time()
rows = db(db.level_time.session_id==session_id).select()
if not rows:
db.level_time.insert(session_id=session_id, start_time=cur_time)
else:
db((db.level_time.session_id==session_id)).update(start_time=cur_time, end_time='')
module_logger.debug("Set start_time of current level.")
param = ['session_id:%s' % session_id]
log_rpc_call(session_id, "_set_start_time", parameters=param)
def _set_end_time(session_id):
cur_time = current_time()
db((db.level_time.session_id==session_id)).update(end_time=cur_time)
module_logger.debug("Set end_time of current level.")
param = ['session_id:%s' % session_id]
log_rpc_call(session_id, "_set_end_time", parameters=param)
def _calculate_time(session_id):
"""The 'end'time is substracted from 'start'time and the result time is
returned in a format suitable to put into the dbase.
Times must be in the format as returnt from util.current_time.
"""
row = db(db.level_time.session_id==session_id).select().first()
start = row['start_time']
end = row['end_time']
start = start.replace('-', ':').replace('_', ':')
end = end.replace('-', ':').replace('_', ':')
arg_key0 = [int(s) for s in start.split(':')]
arg_key1 = [int(s) for s in end.split(':')]
dt0 = datetime.datetime( * arg_key0)
dt1 = datetime.datetime( * arg_key1)
dt2 = dt1 - dt0
h, m = dt2.seconds / 60, dt2.seconds % 60
timespend = "%02d:%02d" % (h, m)
module_logger.debug("Calculated timespend for current level.")
param = ['session_id:%s' % session_id]
log_rpc_call(session_id, "_calculate_time", parameters=param, return_value=timespend)
return timespend
## Local Methods ##
def _set_difficulty(session_id, level):
username = auth.user['username']
password = auth.user['password']
db((db.session.username==username) & (db.session.password==password)
& (db.session.id==session_id)).update(level=level)
module_logger.debug("Set level difficulty: %s" % level)
param = ['session_id:%s'%session_id,'level:%s'%level]
log_rpc_call(session_id, "_set_difficulty", parameters=param, return_value=level)
def log_rpc_call(session_id, call, parameters = None, return_value = None):
"""Logs a given rpc call in the db.
Parameters: String session_id - unique client session ID
String call - rpc call (method) name
List parameters - a list of string containing the parameters and its values
String return_value - value the call returns"""
username = auth.user['username']
row = db(db.session.id==session_id).select().first()
session = row.id
db.rpc_call.insert(username=username, session=session,
call=call, parameters=parameters, return_value=return_value)
| Python |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.title = request.application
response.subtitle = T('Software Development')
#http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = 'Rene Dohmen'
response.meta.description = 'Formatics implements and supports custom applications. Research, new development, prototyping, modification, reuse, re-engineering, maintenance are our key services.'
response.meta.keywords = 'webapplications, software, development'
response.meta.generator = 'Formatics'
response.meta.copyright = 'Copyright Formatics 2011-2015'
##########################################
## this is the main application menu
## add/remove items as required
##########################################
error=URL(r=request,f='error')
#MAIN MENU
if db((db.page.short_title == 'Home')).isempty():
response.menu = [(T('Home'), False, URL('default','index'), [])]
pages=db((db.page.parent==0) & (db.page.language==T.accepted_language)).select(orderby=db.page.order_nr)
for page in pages:
sub_pages=db(db.page.parent==page.id).select(orderby=db.page.order_nr)
if len(sub_pages)<11:
submenu=[]
for sub_page in sub_pages:
submenu.append([sub_page.short_title, False, URL('page','show/%s' % (sub_page.url))])
response.menu.append((page.short_title, False, URL('page','show/%s' % (page.url)), submenu))
else:
response.menu.append((page.short_title, False, URL('page','show/%s' % (page.url))))
response.menu.append((T('Dashboard'), True, URL('dashboard','index')))
#response.menu.append((T('Sessions'), False, URL('dashboard','sessions')))
#response.menu.append((T('RPC Calls'), False, URL('dashboard','rpc_calls')))
#Context menu
response.right_menu=[]
response.navigation_helper=[]
if request.controller=="page" and (request.function=="show" or request.function=="showEditable"):
try:
page_id=int(request.args(0))
page=db.page[page_id] or redirect(error)
except:
page=db(db.page.url==request.args(0)).select().first() or redirect(error)
page_id=page.id
if page.childrenTitle: response.right_menu_title=page.childrenTitle
else: response.right_menu_title=page.short_title
sub_pages=db(db.page.parent==page_id).select(orderby=db.page.order_nr)
for sub_page in sub_pages:
response.right_menu.append((sub_page.short_title, False, URL('page','show/%s' % (sub_page.url),[])))
# deal with navigation helper
parent=page.parent
done=False
while not done:
try:
temp_page=db.page[parent] or redirect(error)
response.navigation_helper.insert(0,(temp_page.short_title, False, URL('page','show/%s' % (temp_page.url),[])))
parent=temp_page.parent
except:
done=True
#response.menu.append([(T('Manage nodes'), False, URL('provision','manageNodes'), [])])
#response.menu.append([(T('Manage packages'), False, URL('provision','managePackages'), [])])
#Static items in context menu
response.right_menu_static = []
response.right_menu_form = []
| Python |
import os
import re
import time
import datetime
import hashlib
import mac2cred
def button(text,action,args=[]):
return A(text,_href=URL(r=request,f=action,args=args), _class='button')
def buttonDownload(text,action,args=[]):
return SPAN('[',A(text,_href=URL(c='static',f=action,args=args)),']')
def buttonIcon(icon,text,action,args=[]):
#try to load png
if os.path.exists(os.path.join(request.folder, 'static','images','btp','cop_icons', '%s.png' % icon)):
image=IMG(_src=URL(r=request,c='static', f=os.path.join('images','btp','cop_icons', '%s.png' % icon)), _width="64")
elif os.path.exists(os.path.join(request.folder, 'static','images','btp','cop_icons', '%s.jpg' % icon)):
image=IMG(_src=URL(r=request,c='static', f=os.path.join('images','btp','cop_icons', '%s.jpg' % icon)), _width="64")
else:
image=IMG(_src=URL(r=request,c='static', f=os.path.join('images','btp','cop_icons', 'noimage.jpg')), _width="64")
temp=SPAN(text, _style='position:relative; top: -30px; padding-left:15px;')
return A([image,temp],_href=URL(r=request,f=action,args=args), _class='button')
def url(f,args=request.args,vars={}):
return URL(r=request,f=f,args=args,vars=vars)
def goto(f,args=request.args,vars={},message='error'):
session.flash=message
redirect(url(f,args=args,vars=vars))
def error():
goto('error')
def get(table, i=0, message='error'):
try:
id = int(request.args(i))
except ValueError:
goto('error',message=message)
return table[id] or goto('error',message=message)
def link_client(client):
return A(client.last_name,_href=url('showClient',client.id))
def link_contact(contact):
return A(contact.last_name,_href=url('showContact',contact.id))
def link(text,action,args=[]):
return SPAN(A(text,_href=URL(r=request,f=action,args=args)))
def pageIcon(link):
linkName, linkExtension = os.path.splitext(link)
if linkExtension:
linkExtension=re.sub('\.', '', linkExtension)
if os.path.exists(os.path.join(request.folder, 'static','images','extensions','%s.png' % linkExtension)): #Yay! Found nice icon
return IMG(_src=URL(r=request,c='static', f=os.path.join('images','extensions','%s.png' % linkExtension)), _width="48")
elif os.path.exists(os.path.join(request.folder, 'static','images','icons','file_extension_%s.png' % linkExtension)): #Mhm! Only found less nice icon but an icon nonetheless
return IMG(_src=URL(r=request,c='static', f=os.path.join('images','icons','file_extension_%s.png' % linkExtension)), _width="48")
else:
return IMG(_src=URL(r=request,c='static', f=os.path.join('images','extensions','download.png')), _width="48")
else: #no extension found; my best guess is that it's a link
return IMG(_src=URL(r=request,c='static', f=os.path.join('images','extensions','isp.png')), _width="48")
def showQuiz(quiz, quiz_id):
if quiz=='personal':
content=db(db.quizpersonal.id==quiz_id).select().first()
elif quiz=='regional':
content=db(db.quizregional.id==quiz_id).select().first()
else:
content=False
if content:
myDict=dict(content=content)
return response.render('emulator/show.html', myDict)
#return dict(content=content)
#No error handling needed??
#else:
# redirect(URL('quiz', 'index'))
def md5_hash(text):
""" Generate a md5 hash with the given text """
return hashlib.md5(text).hexdigest()
def addslashes(s):
s = s.replace("'", "\\'")
s = XML(s)
return s
def resizeImage(fileName, size=100, fileLocation=False, fileExt="png", fileType="PNG"):
try:
import os, uuid
import os.path
from PIL import Image
except: return
#fileDirectory = '%sstatic/images/thumbs/%s' % (request.folder, size)
if fileLocation == False:
fileLocation = '%sstatic/images/thumbs/%s/%s.%s' % (request.folder, size, os.path.splitext(fileName)[0], fileExt)
d = os.path.dirname(fileLocation)
if not os.path.exists(d):
os.makedirs(d)
resize=(size,size)
try:
im=Image.open(request.folder + 'uploads/' + fileName)
except: return
im.thumbnail(resize,Image.ANTIALIAS)
#thumbName='page.imageThumb.%s.png' % (uuid.uuid4())
im.save(fileLocation, fileType, quality=95)
return
def current_time():
"""Maincore uses this to get the current time to set the 'time_start'
and 'time_end' values in the dbase table"""
t = time.strftime("%y-%m-%d_%H:%M:%S", time.localtime())
return t
def get_web2py_version():
file_path = "VERSION"
txt = open(file_path)
line = txt.readline()
version = line.split(" ")[1]
return version
| Python |
try:
import loggersetup
except Exception,info:
print info | Python |
#set the language
if auth.has_membership('admins'):
#if 'siteLanguage' in request.cookies and not (request.cookies['siteLanguage'] is None):
#T.force(request.cookies['siteLanguage'].value)
T.force('nl')
else:
#FORCE LANG TO NL for non admin stuff
#WHEN EN trans is done; remove the admin stuff
T.force('nl')
| Python |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a samples controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
#error handler
error=URL(r=request,f='error')
error_no_text=URL(r=request,f='error_no_text')
error_no_linked_content=URL(r=request,f='error_no_linked_content')
#needed for Picasa
import gdata.photos.service
import gdata.media
def debug():
return dict(message=BEAUTIFY(request))
@auth.requires_membership('admins')
def index():
"""
Start page of page module: lets you make new pages
"""
auth.settings.registration_requires_approval = True
maxPage=db((db.page.parent==0) & (db.page.language==T.accepted_language)).count()
form=SQLFORM(db.page)
form.vars.parent=0
form.vars.language=T.accepted_language
form.vars.order_nr=maxPage+1
if form.process().accepted:
redirect(URL())
elif form.errors:
response.flash = T('form has errors')
else:
response.flash = T('please fill the form')
pages=db(db.page.language==T.accepted_language).select()
return dict(pages=pages, form=form)
@auth.requires_membership('admins')
def deletePageItem():
try:
page_id, page_item_id = request.args[:2]
except:
redirect(error)
page=db(db.page.id==page_id).select().first() or redirect(error)
page_item=db(db.page_item.id==page_item_id).select().first() or redirect(error)
if page_item.tablename=='page_text':
del db.page_text[page_item.record_id]
elif page_item.tablename=='page_image':
del db.page_image[page_item.record_id]
elif page_item.tablename=='page_link':
del db.page_link[page_item.record_id]
elif page_item.tablename=='page_faq':
del db.page_faq[page_item.record_id]
elif page_item.tablename=='page_dealer':
del db.page_dealer[page_item.record_id]
elif page_item.tablename=='page_file':
del db.page_file[page_item.record_id]
elif page_item.tablename=='page_picasa':
del db.page_picasa[page_item.record_id]
elif page_item.tablename=='page_youtube':
del db.page_youtube[page_item.record_id]
elif page_item.tablename=='page_slider':
del db.page_slider[page_item.record_id]
elif page_item.tablename=='page_form':
del db.page_form[page_item.record_id]
else:
#Unknow delete type!
redirect(error)
#order nummers fatsoenlijk zetten na een delete
all_page_items = db(db.page_item.page==page_id).select();
deleted_page_item=db(db.page_item.id==page_item_id).select().first() or redirect(error)
for page_item in all_page_items:
if (int(page_item.order_nr) > int(deleted_page_item.order_nr)):
orderNr = int(page_item.order_nr)-1
db((db.page_item.id == page_item.id) & (db.page_item.page==page_id)).update(order_nr = orderNr)
#delete the page_item
del db.page_item[page_item_id]
redirect(URL('showEditable/%s' % page_id))
def makeThumbnail(dbtable,ImageID,size=(134,134)):
try:
thisImage=db(dbtable.id==ImageID).select()[0]
import os, uuid
from PIL import Image
except: return
im=Image.open(request.folder + 'uploads/' + thisImage.image)
im.thumbnail(size,Image.ANTIALIAS)
thumbName='page.imageThumb.%s.png' % (uuid.uuid4())
im.save(request.folder + 'static/images/thumbs/' + thumbName,'png', optimize=True)
thisImage.update_record(imageThumb=thumbName)
return
@auth.requires_membership('admins')
def sortOrderNr():
try:
#ophalen args
page_id, page_item_id, new_sort_var = request.args[:3]
#cast naar int zzzzz
page_id=int(page_id)
page_item_id=int(page_item_id)
order_nr_new=int(new_sort_var)
except:
redirect(error)
page_item1=db(db.page_item.id==page_item_id).select().first() or redirect(error)
order_nr_old=page_item1.order_nr
if(order_nr_new > order_nr_old):
#move down
#regel alle nummers die moeten veranderen
updateOrderNrs = db((db.page_item.order_nr > order_nr_old) & (db.page_item.order_nr <= order_nr_new) & (db.page_item.page==page_id)).select(orderby=db.page_item.order_nr)
#zet het order_nr dat verplaatst wordt op 0
db((db.page_item.order_nr == order_nr_old) & (db.page_item.page==page_id)).update(order_nr = 0)
#tempNr voor het toewijzen aan order_nr
tempNr = int(order_nr_old)
for page_item in updateOrderNrs:
db(db.page_item.id == page_item.id).update(order_nr = tempNr)
tempNr += 1
#geef het verschoven record de juiste waarde
db((db.page_item.order_nr == 0) & (db.page_item.page==page_id)).update(order_nr = order_nr_new)
message = 'moved down'
else:
#move up
#regel alle nummers die moeten veranderen
updateOrderNrs = db((db.page_item.order_nr < order_nr_old) & (db.page_item.order_nr >= order_nr_new) & (db.page_item.page==page_id)).select(orderby=db.page_item.order_nr)
#zet het order_nr dat verplaatst wordt op 0
db((db.page_item.order_nr == order_nr_old) & (db.page_item.page==page_id)).update(order_nr = 0)
#tempNr voor het toewijzen aan order_nr
tempNr = int(order_nr_new) + 1
for page_item in updateOrderNrs:
db(db.page_item.id == page_item.id).update(order_nr = tempNr)
tempNr += 1
#geef het verschoven record de juiste waarde
db((db.page_item.order_nr == 0) & (db.page_item.page==page_id)).update(order_nr = order_nr_new)
message = 'moved up'
redirect(URL('showEditable/%s' % page_id))
@auth.requires_membership('admins')
def resetOrderNr():
try:
page_id=int(request.args(0))
except:
redirect(error)
for i in range (1,6):
db((db.page_item.id == i) & (db.page_item.page==page_id)).update(order_nr = i)
message = 'reset'
redirect(URL('showEditable/%s' % page_id))
return dict(message=message)
@auth.requires_membership('admins')
def editMarkmin():
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_text[item_id] or redirect(error)
form=SQLFORM(db.page_text,item,formstyle='table2cols')
else: form=SQLFORM(db.page_text,formstyle='table2cols')
form.vars.type='markmin'
if form.process().accepted:
if item_id=="0":
last=db(db.page_text).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_text',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('text'))
@auth.requires_membership('admins')
def editHtml():
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_text[item_id] or redirect(error)
form=SQLFORM(db.page_text,item,formstyle='table2cols')
else: form=SQLFORM(db.page_text,formstyle='table2cols')
form.vars.type='html'
if form.process().accepted:
if item_id=="0":
last=db(db.page_text).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_text',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('text'))
@auth.requires_membership('admins')
def edit():
"""Implements editing of page title and url"""
dbtable = db.page #uploads table name
page_id=int(request.args(0))
page=db.page[page_id] or redirect(error)
form=SQLFORM(db.page,page)
if form.process().accepted:
#makeThumbnail(dbtable,form.vars.id,(134,134)) #Thumbnail not used in this project
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('Editing the page itself'))
@auth.requires_membership('admins')
def editText():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_text[item_id] or redirect(error)
form=SQLFORM(db.page_text,item)
else: form=SQLFORM(db.page_text)
form.vars.type='textarea'
if form.process().accepted:
if item_id=="0":
last=db(db.page_text).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_text',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('Plain text'))
@auth.requires_membership('admins')
def editLink():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_link[item_id] or redirect(error)
form=SQLFORM(db.page_link,item)
else: form=SQLFORM(db.page_link)
if form.process().accepted:
if item_id=="0":
last=db(db.page_link).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_link',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('link'))
@auth.requires_membership('admins')
def editFAQ():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_faq[item_id] or redirect(error)
form=SQLFORM(db.page_faq,item)
else: form=SQLFORM(db.page_faq)
if form.process().accepted:
if item_id=="0":
last=db(db.page_faq).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_faq',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('faq'))
@auth.requires_membership('admins')
def editDealer():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_dealer[item_id] or redirect(error)
form=SQLFORM(db.page_dealer,item)
else: form=SQLFORM(db.page_dealer)
if form.process().accepted:
if item_id=="0":
last=db(db.page_dealer).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_dealer',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('dealer'))
@auth.requires_membership('admins')
def editImage():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_image[item_id] or redirect(error)
form=SQLFORM(db.page_image,item)
else: form=SQLFORM(db.page_image)
if form.process().accepted:
if item_id=="0":
last=db(db.page_image).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_image',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('image'))
def editForm():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_form[item_id] or redirect(error)
form=SQLFORM(db.page_form,item)
else: form=SQLFORM(db.page_form)
if form.process().accepted:
if item_id=="0":
last=db(db.page_form).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_form',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('form'))
@auth.requires_membership('admins')
def editFile():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_file[item_id] or redirect(error)
form=SQLFORM(db.page_file,item)
else: form=SQLFORM(db.page_file)
if form.process().accepted:
if item_id=="0":
last=db(db.page_file).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_file',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('file'))
@auth.requires_membership('admins')
def editPicasa():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_picasa[item_id] or redirect(error)
form=SQLFORM(db.page_picasa,item)
else: form=SQLFORM(db.page_picasa)
if form.process().accepted:
if item_id=="0":
last=db(db.page_picasa).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_picasa',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('picasa'))
@auth.requires_membership('admins')
def editYoutube():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_youtube[item_id] or redirect(error)
form=SQLFORM(db.page_youtube,item)
else: form=SQLFORM(db.page_youtube)
if form.process().accepted:
if item_id=="0":
last=db(db.page_youtube).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_youtube',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('youtube'))
@auth.requires_membership('admins')
def editFacebook():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_facebook[item_id] or redirect(error)
form=SQLFORM(db.page_facebook,item)
else: form=SQLFORM(db.page_facebook)
if form.process().accepted:
if item_id=="0":
last=db(db.page_facebook).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_facebook',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('facebook'))
@auth.requires_membership('admins')
def editTwitter():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_twitter[item_id] or redirect(error)
form=SQLFORM(db.page_twitter,item)
else: form=SQLFORM(db.page_twitter)
if form.process().accepted:
if item_id=="0":
last=db(db.page_twitter).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_twitter',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('twitter'))
@auth.requires_membership('admins')
def editSlider():
response.view='page/edit.html'
try:
page_id, item_id = request.args[:2]
except:
redirect(error)
page=db.page[page_id] or redirect(error)
if item_id!="0":
item=db.page_slider[item_id] or redirect(error)
form=SQLFORM(db.page_slider,item)
else: form=SQLFORM(db.page_slider)
if form.process().accepted:
if item_id=="0":
last=db(db.page_slider).select().last()
maxPageItem=db(db.page_item.page==page_id).count()
db.page_item.insert(page=page_id,record_id=last.id,tablename='page_slider',order_nr=maxPageItem+1)
redirect(URL('showEditable/%s' % page_id))
return dict(form=form, page=page, subtitle=T('slider'))
@auth.requires_membership('admins')
def movePage():
#request 1 should be ID or URL_name
if request.args(1):
#request 2 should be new parent
page_id=request.args(0)
page_parent=request.args(1)
my_page=db(db.page.id==page_id).select().first() or redirect(error)
my_url=my_page.url
new_parent=db(db.page.id==page_parent).select().first()
if new_parent: new_parent=new_parent.id
else: new_parent=0
tempNr = int(my_page.order_nr)
updateOrderNrs = db((db.page.parent==my_page.parent) & (db.page.order_nr > my_page.order_nr)).select(orderby=db.page.order_nr)
db(db.page.id==my_page.id).update(order_nr = 0)
for page in updateOrderNrs:
db(db.page.id == page.id).update(order_nr = tempNr)
tempNr += 1
highestOrderNr=db(db.page.parent==new_parent).select(orderby=db.page.order_nr).last()
if highestOrderNr: highestOrderNr=int(highestOrderNr.order_nr + 1)
else: highestOrderNr=1
row = db(db.page.id == page_id).select().first()
row.update_record(parent=page_parent, order_nr=highestOrderNr)
redirect(URL('show',args=(my_url)))
try:
page_id=int(request.args(0))
my_page=db.page[page_id] or redirect(error)
except:
my_page=db(db.page.url==request.args(0)).select().first() or redirect(error)
page_id=page.id
pages=db(db.page).select(orderby=db.page.parent) or redirect(error)
return dict(my_page=my_page, pages=pages)
def showPicasa():
gd_photo_client = gdata.photos.service.PhotosService()
#id = request.args(0) or 'error'
userID=request.args(0) or 'error'
albumID=request.args(1) or 'error'
if userID and albumID:
try:
photos = gd_photo_client.GetFeed('/data/feed/base/user/%s/albumid/%s?kind=photo' % (userID, albumID))
except:
redirect(error_no_linked_content)
return dict(photos=photos.entry)
#@cache(request.env.path_info, time_expire=1, cache_model=cache.ram)
def show():
import time
t = time.ctime()
#request 1 should be ID or URL_name
try:
page_id=int(request.args(0))
page=db.page[page_id] or redirect(error)
except:
page=db(db.page.url==request.args(0)).select().first() or redirect(error)
page_id=page.id
page_children=db(db.page.parent==page_id).select(orderby=db.page.order_nr)
page_items = db((db.page_item.page==page.id) & (db.page_item.tablename!='page_form')).select(orderby=db.page_item.order_nr)
is_admin=auth.has_membership('admins')
content=[]
for page_item in page_items:
if page_item.tablename=='page_text':
temp=db.page_text[page_item.record_id] or redirect(error_no_text)
content.append(temp)
elif page_item.tablename=='page_image':
temp=db.page_image[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_link':
temp=db.page_link[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_faq':
temp=db.page_faq[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_dealer':
temp=db.page_dealer[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_file':
temp=db.page_file[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_picasa':
temp=db.page_picasa[page_item.record_id] or redirect(error_no_linked_content)
gd_photo_client = gdata.photos.service.PhotosService()
#id = request.args(0) or 'error'
userID=temp.userid
albumID=temp.albumid
if userID and albumID:
try:
photos = gd_photo_client.GetFeed('/data/feed/base/user/%s/albumid/%s?kind=photo' % (userID, albumID))
content.append(photos.entry)
except:
redirect(error_no_linked_content)
elif page_item.tablename=='page_youtube':
temp=db.page_youtube[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_facebook':
temp=db.page_facebook[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_twitter':
temp=db.page_twitter[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_slider':
temp=db.page_slider[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
return dict(page=page, page_items=page_items, content=content, page_children=page_children, is_admin=is_admin)
@auth.requires_membership('admins')
def showEditable():
#request 1 should be ID or URL_name
try:
page_id=int(request.args(0))
page=db.page[page_id] or redirect(error)
except:
page=db(db.page.url==request.args(0)).select().first() or redirect(error)
page_id=page.id
page_children=db(db.page.parent==page_id).select(orderby=db.page.order_nr)
form=SQLFORM.factory(db.page)
if form.accepts(request.vars):
#makeThumbnail(dbtable,form.vars.id,(134,134))
form.vars.parent=page_id
maxPage=db(db.page.parent==page.parent).count()
form.vars.order_nr=maxPage+1
form.vars.language=T.accepted_language
db.page.insert(**db.page._filter_fields(form.vars))
redirect(URL('showEditable/%s' % page_id))
page_items = db(db.page_item.page==page.id).select(orderby=db.page_item.order_nr)
content=[]
for page_item in page_items:
if page_item.tablename=='page_text':
temp=db.page_text[page_item.record_id] or redirect(error_no_text)
content.append(temp)
elif page_item.tablename=='page_image':
temp=db.page_image[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_link':
temp=db.page_link[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_faq':
temp=db.page_faq[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_dealer':
temp=db.page_dealer[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_file':
temp=db.page_file[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_picasa':
temp=db.page_picasa[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_youtube':
temp=db.page_youtube[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_facebook':
temp=db.page_facebook[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_twitter':
temp=db.page_twitter[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_slider':
temp=db.page_slider[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
elif page_item.tablename=='page_form':
temp=db.page_form[page_item.record_id] or redirect(error_no_linked_content)
content.append(temp)
return dict(page=page, page_items=page_items, page_children=page_children,
content=content, form=form)
def getAlbums():
userID= request.args(0)
lstAlbums = []
if userID:
try:
albums = gd_photo_client.GetUserFeed(user=userID)
for album in albums.entry:
entry=A(IMG(_src=album.media.thumbnail[0].url, _alt=album.title.text), _href=URL('getPhotos/%s/%s' % (userID, album.gphoto_id.text)))
lstAlbums.append(entry)
except:
redirect(URL('index'))
else:
redirect(URL('index'))
return dict(lstAlbums=DIV(*[lstAlbums]))
def getPhotos():
userID=request.args(0)
albumID=request.args(1)
lstPhotos =[]
if userID and albumID:
try:
photos = gd_photo_client.GetFeed('/data/feed/base/user/%s/albumid/%s?kind=photo' % (userID, albumID))
for photo in photos.entry:
entry=A(IMG(_src=photo.media.content[0].url, _alt=photo.title.text, _width='320', _height='258'))
lstPhotos.append(entry)
except:
redirect(URL('index'))
else:
redirect(URL('index'))
return dict(lstPhotos=DIV(*[lstPhotos]))
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
@auth.requires_membership('admins')
def export():
db.export_to_csv_file(open('somefile.csv', 'wb'))
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
def data():
"""
http://..../[app]/default/data/tables
http://..../[app]/default/data/create/[table]
http://..../[app]/default/data/read/[table]/[id]
http://..../[app]/default/data/update/[table]/[id]
http://..../[app]/default/data/delete/[table]/[id[
http://..../[app]/default/data/select/[table]
http://..../[app]/default/data/search/[table]
but URLs bust be signed, i.e. linked with
A('table',_href=URL('data/tables',user_signature=True))
or with the signed load operator
LOAD('default','data.load',args='tables',ajax=True,user_signature=True)
"""
return dict(form=crud()) | Python |
# -*- coding: utf-8 -*-
# ##########################################################
# ## make sure administrator is on localhost
# ###########################################################
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1','127.0.0.1','::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.env.http_x_forwarded_for or request.env.wsgi_url_scheme\
in ['https', 'HTTPS']:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1"):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if (request.application=='admin' and not session.authorized) or \
(request.application!='admin' and not gluon.fileutils.check_credentials(request)):
redirect(URL('admin', 'default', 'index'))
ignore_rw = True
response.view = 'appadmin.html'
response.menu = [[T('design'), False, URL('admin', 'default', 'design',
args=[request.application])], [T('db'), False,
URL('index')], [T('state'), False,
URL('state')], [T('cache'), False,
URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
cond = False
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename,db,request=request):
keyed = hasattr(db[tablename],'_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases)
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form,table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request,db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args)>1 and hasattr(db[request.args[1]],'_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
stop = start + 100
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px',
_name='query', _value=request.vars.query or '',
requires=IS_NOT_EMPTY(error_message=T("Cannot be empty")))), TR(T('Update:'),
INPUT(_name='update_check', _type='checkbox',
value=False), INPUT(_style='width:400px',
_name='update_fields', _value=request.vars.update_fields
or '')), TR(T('Delete:'), INPUT(_name='delete_check',
_class='delete', _type='checkbox', value=False), ''),
TR('', '', INPUT(_type='submit', _value='submit'))),
_action=URL(r=request,args=request.args))
if request.vars.csvfile != None:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'),PRE(str(e)))
if form.accepts(request.vars, formname=None):
# regex = re.compile(request.args[0] + '\.(?P<table>\w+)\.id\>0')
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query).count()
if form.vars.update_check and form.vars.update_fields:
db(query).update(**eval_in_global_env('dict(%s)'
% form.vars.update_fields))
response.flash = T('%s rows updated', nrows)
elif form.vars.delete_check:
db(query).delete()
response.flash = T('%s rows deleted', nrows)
nrows = db(query).count()
if orderby:
rows = db(query).select(limitby=(start, stop),
orderby=eval_in_global_env(orderby))
else:
rows = db(query).select(limitby=(start, stop))
except Exception, e:
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'),PRE(str(e)))
return dict(
form=form,
table=table,
start=start,
stop=stop,
nrows=nrows,
rows=rows,
query=request.vars.query,
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table],'_primarykey')
record = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[0]]).select().first()
else:
record = db(db[table].id == request.args(2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable=False
form = SQLFORM(db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('done!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form,table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
form = FORM(
P(TAG.BUTTON("Clear CACHE?", _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON("Clear RAM", _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON("Clear DISK", _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
clear_ram = False
clear_disk = False
session.flash = ""
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += "Ram Cleared "
if clear_disk:
cache.disk.clear()
session.flash += "Disk Cleared"
redirect(URL(r=request))
try:
from guppy import hpy; hp=hpy()
except ImportError:
hp = False
import shelve, os, copy, time, math
from gluon import portalocker
ram = {
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time()
}
disk = copy.copy(ram)
total = copy.copy(ram)
for key, value in cache.ram.storage.items():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
locker = open(os.path.join(request.folder,
'cache/cache.lock'), 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
disk_storage = shelve.open(os.path.join(request.folder, 'cache/cache.shelve'))
try:
for key, value in disk_storage.items():
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
finally:
portalocker.unlock(locker)
locker.close()
disk_storage.close()
total['bytes'] = ram['bytes'] + disk['bytes']
total['objects'] = ram['objects'] + disk['objects']
total['hits'] = ram['hits'] + disk['hits']
total['misses'] = ram['misses'] + disk['misses']
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] + total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
return dict(form=form, total=total,
ram=ram, disk=disk)
| Python |
# This file was developed by Massimo Di Pierro
# It is released under BSD, MIT and GPL2 licenses
##########################################################
# code to handle wiki pages
##########################################################
@auth.requires(plugin_wiki_editor)
def index():
w = db.plugin_wiki_page
if plugin_wiki_editor:
pages = db(w.id>0).select(orderby=w.slug)
else:
pages = db(w.is_active==True)(w.is_public==True).select(orderby=w.slug)
if plugin_wiki_editor:
form=SQLFORM.factory(Field('slug',requires=db.plugin_wiki_page.slug.requires),
Field('from_template',requires=IS_EMPTY_OR(IS_IN_DB(db,db.plugin_wiki_page.slug))))
if form.accepts(request.vars):
redirect(URL(r=request,f='page',args=form.vars.slug,vars=dict(template=request.vars.from_template or '')))
else:
form=''
return dict(pages=pages,form=form)
def page():
"""
shows a page
"""
slug = request.args(0) or 'index'
w = db.plugin_wiki_page
page = w(slug=slug)
if not auth.user and (not page or not page.is_public or not page.is_active):
redirect(URL(r=request,c='default',f='user',args='login'))
elif not plugin_wiki_editor and (not page or not page.is_public or not page.is_active):
raise HTTP(404)
elif page and page.role and not auth.has_membership(page.role):
raise HTTP(404)
if request.extension=='load':
return plugin_wiki.render(page.body)
if request.extension=='html':
return dict(page=page,slug=slug)
return MARKMIN(page.body,extra={'widget':(lambda code:''),
'template':(lambda template:'')})
def page_archive():
"""
shows and old version of a page
"""
id = request.args(0)
h = db.plugin_wiki_page_archive
page = h(id)
if not page or (not plugin_wiki_editor and (not page.is_public or not page.is_active)):
raise HTTP(404)
elif page and page.role and not auth.has_membership(page.role):
raise HTTP(404)
if request.extension!='html': return page.body
return dict(page=page)
@auth.requires(plugin_wiki_editor)
def page_edit():
"""
edit a page
"""
slug = request.args(0) or 'index'
w = db.plugin_wiki_page
w.role.writable = w.role.readable = plugin_wiki_level>1
page = w(slug=slug)
if not page:
page = w.insert(slug=slug,
title=slug.replace('-',' ').capitalize(),
body=request.vars.template and w(slug=request.vars.template).body or '')
form = crud.update(w, page, onaccept=crud.archive,
next=URL(r=request,f='page',args=request.args))
return dict(form=form,page=page)
def page_history():
"""
show page changelog
"""
slug = request.args(0) or 'index'
w = db.plugin_wiki_page
h = db.plugin_wiki_page_archive
page = w(slug=slug)
history = db(h.current_record==page.id).select(orderby=~h.modified_on)
return dict(page=page, history=history)
##########################################################
# ajax callbacks
##########################################################
@auth.requires_login()
def attachments():
"""
allows to edit page attachments
"""
a=db.plugin_wiki_attachment
a.tablename.default=tablename=request.args(0)
a.record_id.default=record_id=request.args(1)
#if request.args(2): a.filename.writable=False
form=crud.update(a,request.args(2),
next=URL(r=request,args=request.args[:2]))
if request.vars.list_all:
query = a.id>0
else:
query = (a.tablename==tablename)&(a.record_id==record_id)
rows=db(query).select(orderby=a.name)
return dict(form=form,rows=rows)
def attachment():
"""
displays an attachments
"""
short=request.args(0)
if plugin_wiki_authorize_attachments and \
not short in session.plugin_wiki_attachments:
raise HTTP(400)
a=db.plugin_wiki_attachment
record=a(short.split('.')[0])
if not record: raise HTTP(400)
request.args[0]=record.filename
return response.download(request,db)
def comment():
"""
post a comment
"""
tablename, record_id = request.args(0), request.args(1)
table=db.plugin_wiki_comment
if record_id=='None': record_id=0
table.tablename.default=tablename
table.record_id.default=record_id
if auth.user:
form = crud.create(table)
else:
form = A(T('login to comment'),_href=auth.settings.login_url)
comments=db(table.tablename==tablename)\
(table.record_id==record_id).select()
return dict(form = form,comments=comments)
#@auth.requires_login()
def jqgrid():
"""
jqgrid callback retrieves records
http://trirand.com/blog/jqgrid/server.php?q=1&_search=false&nd=1267835445772&rows=10&page=1&sidx=amount&sord=asc&searchField=&searchString=&searchOper=
"""
from gluon.serializers import json
import cgi
hash_vars = 'tablename|columns|fieldname|fieldvalue|user'.split('|')
if not URL.verify(request,hmac_key=auth.settings.hmac_key,
hash_vars=hash_vars,salt=auth.user_id):
raise HTTP(404)
tablename = request.vars.tablename or error()
columns = (request.vars.columns or error()).split(',')
rows=int(request.vars.rows or 25)
page=int(request.vars.page or 0)
sidx=request.vars.sidx or 'id'
sord=request.vars.sord or 'asc'
searchField=request.vars.searchField
searchString=request.vars.searchString
searchOper={'eq':lambda a,b: a==b,
'nq':lambda a,b: a!=b,
'gt':lambda a,b: a>b,
'ge':lambda a,b: a>=b,
'lt':lambda a,b: a<b,
'le':lambda a,b: a<=b,
'bw':lambda a,b: a.like(b+'%'),
'bn':lambda a,b: ~a.like(b+'%'),
'ew':lambda a,b: a.like('%'+b),
'en':lambda a,b: ~a.like('%'+b),
'cn':lambda a,b: a.like('%'+b+'%'),
'nc':lambda a,b: ~a.like('%'+b+'%'),
'in':lambda a,b: a.belongs(b.split()),
'ni':lambda a,b: ~a.belongs(b.split())}\
[request.vars.searchOper or 'eq']
table=db[tablename]
if request.vars.fieldname:
names = request.vars.fieldname.split('|')
values = request.vars.fieldvalue.split('|')
query = reduce(lambda a,b:a&b,
[table[names[i]]==values[i] for i in range(len(names))])
else:
query = table.id>0
dbset = table._db(query)
if searchField: dbset=dbset(searchOper(table[searchField],searchString))
orderby = table[sidx]
if sord=='desc': orderby=~orderby
limitby=(rows*(page-1),rows*page)
fields = [table[f] for f in columns]
records = dbset.select(orderby=orderby,limitby=limitby,*fields)
nrecords = dbset.count()
items = {}
items['page']=page
items['total']=int((nrecords+(rows-1))/rows)
items['records']=nrecords
readable_fields=[f.name for f in fields if f.readable]
def f(value,fieldname):
r = table[fieldname].represent
if r: value=r(value)
try: return value.xml()
except: return cgi.escape(str(value))
items['rows']=[{'id':r.id,'cell':[f(r[x],x) for x in readable_fields]} \
for r in records]
return json(items)
def tags():
import re
db_tag = db.plugin_wiki_tag
db_link = db.plugin_wiki_link
table_name=request.args(0)
record_id=request.args(1)
if not auth.user_id:
return ''
form = SQLFORM.factory(Field('tag_name',requires=IS_SLUG()))
if request.vars.tag_name:
for item in request.vars.tag_name.split(','):
tag_name = re.compile('\s+').sub(' ',item).strip()
tag_exists = tag = db(db_tag.name==tag_name).select().first()
if not tag_exists:
tag = db_tag.insert(name=tag_name, links=1)
link = db(db_link.tag==tag.id)\
(db_link.table_name==table_name)\
(db_link.record_id==record_id).select().first()
if not link:
db_link.insert(tag=tag.id,
table_name=table_name,record_id=record_id)
if tag_exists:
tag.update_record(links=tag.links+1)
for key in request.vars:
if key[:6]=='delete':
link_id=key[6:]
link=db_link[link_id]
del db_link[link_id]
db_tag[link.tag] = dict(links=db_tag[link.tag].links-1)
links = db(db_link.table_name==table_name)\
(db_link.record_id==record_id).select()\
.sort(lambda row: row.tag.name.upper())
return dict(links=links, form=form)
def cloud():
tags = db(db.plugin_wiki_tag.links>0).select(limitby=(0,20))
if tags:
mc = max([tag.links for tag in tags])
return DIV(_class='plugin_wiki_tag_cloud',*[SPAN(A(tag.name,_href=URL(r=request,c='plugin_wiki',f='page',args=('tag',tag.id))),_style='font-size:%sem' % (0.8+1.0*tag.links/mc)) for tag in tags])
@auth.requires(plugin_wiki_editor)
def widget_builder():
"""
>> inspect.getargspec(PluginWikiWidgets.tags)
(['table', 'record_id'], None, None, ('None', None))
>>> dir(PluginWikiWidgets)
"""
import inspect
name=request.vars.name
if plugin_wiki_widgets=='all':
widgets = ['']+[item for item in dir(PluginWikiWidgets) if item[0]!='_']
else:
widgets = plugin_wiki_widgets
form=FORM(LABEL('Widget Name: '), SELECT(_name='name',value=name,
_onchange="jQuery(this).parent().submit()",*widgets))
widget_code=''
if name in widgets:
a,b,c,d=inspect.getargspec(getattr(PluginWikiWidgets,name))
a,d=a or [],d or []
null = lambda:None
d=[null]*(len(a)-len(d))+[x for x in d]
ESC='x'
fields = [Field(ESC+a[i],label=a[i],default=d[i]!=null and d[i] or '',
requires=(d[i]==null) and IS_NOT_EMPTY() or None,
comment=(d[i]==null) and 'required' or '') \
for i in range(len(a))]
form_widget=SQLFORM.factory(hidden=dict(name=name),*fields)
doc = getattr(PluginWikiWidgets,name).func_doc or ''
if form_widget.accepts(request.vars):
keys=['name: %s' % request.vars.name]
for name in a:
if request.vars[ESC+name]:
keys.append(name+': %s' % request.vars[ESC+name])
widget_code=CODE('``\n%s\n``:widget' % '\n'.join(keys))
else:
doc=''
form_widget=''
return dict(form=form,form_widget=form_widget,doc=doc,
widget_code=widget_code)
def star_rate():
N=5 #max no of stars (if you use split stars you'll get a rating out of 10)
pm = db.plugin_wiki_rating
pa = db.plugin_wiki_rating_aux
tablename = request.args(0)
record_id = request.args(1)
rating = abs(float(request.vars.rating or 0))
try:
db[tablename] #if there's no such table. Salute.
if rating>N or rating<0: raise Exception #similar if rating is simulated.
if not db[tablename][record_id]: raise Exception #also if there's no specified record in table
if not auth.user_id: raise Exception #user has to login to rate
except:
return ''
master = db(pm.tablename==tablename)(pm.record_id==record_id).select().first()
if master:
master_rating, master_counter = master.rating, master.counter
else:
master_rating, master_counter = 0, 0
master=pm.insert(tablename=tablename,record_id=record_id,rating=master_rating,counter=master_counter)
record = db(pa.master==master)(pa.created_by==auth.user_id).select().first()
if rating:
if not record:
record = pa.insert(master=master,rating=rating,created_by=auth.user_id)
master_rating = (master_rating*master_counter + rating)/(master_counter+1)
master_counter = master_counter + 1
else:
master_counter = master_counter
master_rating = (master_rating*master_counter - record.rating + rating)/master_counter
record.update_record(rating=rating)
master.update_record(rating=master_rating, counter=master_counter)
try:
db[tablename][record_id]['rating']
except:
return ''
else:
db[tablename][record_id].update_record(rating=master_rating)
return ''
| Python |
# -*- coding: utf-8 -*-
"""
@author: Bruno Cezar Rocha
@titter: @rochacbruno
@company: blouweb.com
@depends: http://www.wbotelhos.com/gridy/ - Jquery Gridy Plugin
@include: http://nyromodal.nyrodev.com/ - nyroModal
@include: http://css3buttons.michaelhenriksen.dk/ - CSS3 Buttons
@depends: http://www.web2py.com - web2py Faster, Better and more easily web development!
@license for Gridy library and PowerGrid Plugin
The MIT License
Copyright (c) 2010 Washington Botelho dos Santos (jquery.gridy)
Copyright (c) 2011 Bruno Cezar Rocha (PowerGrid Plugin for web2py)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@DONATE! PayPal - rochacbruno@gmail.com
Go VEGAN!
"""
# uncomment line below if you want to require user signature.
#@auth.requires_signature()
def data():
"""
http://..../[app]/default/data/tables
http://..../[app]/default/data/create/[table]
http://..../[app]/default/data/read/[table]/[id]
http://..../[app]/default/data/update/[table]/[id]
http://..../[app]/default/data/delete/[table]/[id[
http://..../[app]/default/data/select/[table]
http://..../[app]/default/data/search/[table]
but URLs bust be signed, i.e. linked with
A('table',_href=URL('data/tables',user_signature=True))
or with the signed load operator
LOAD('default','data.load',args='tables',ajax=True,user_signature=True)
"""
if request.args(0) == 'deleted':
return dict(message='Deleted')
#crud.settings.formstyle = 'divs'
crud.settings.controller = 'plugin_PowerGrid'
crud.settings.download_url = URL('download')
def updater(form, action):
#TODO: Stas van 4x 12 regels code MAX 4 regels laten maken!
if action == 'delete':
closeForm=False
else:
closeForm=True
if form.vars.file:
resizeImage(form.vars.file, 390)
resizeImage(form.vars.file, 100)
if action == 'update':
if form.vars.delete_this_record:
action='delete'
if action == 'create' or action == 'delete':
quiz=request.args(1)
if action == 'delete':
groupID=request.args(4)
difficulty=request.args(3)
else:
groupID=request.args(2)
difficulty=form.vars.difficulty
fieldName=("%s_%s" % (quiz, difficulty))
fieldName=fieldName[4:]
#response.write(fieldName)
row = db(db.stats.group_id==groupID).select().first()
if action == 'create':
setattr(row, fieldName, getattr(row, fieldName) + 1)
elif action == 'delete':
setattr(row, fieldName, getattr(row, fieldName) - 1)
row.update_record(personal_1=row.personal_1,
personal_2=row.personal_2,
personal_3=row.personal_3,
personal_4=row.personal_4,
personal_5=row.personal_5,
personal_6=row.personal_6,
regional_1=row.regional_1,
regional_2=row.regional_2,
regional_3=row.regional_3,
regional_4=row.regional_4,
regional_5=row.regional_5,
regional_6=row.regional_6)
db.commit()
if closeForm:
form.append(SCRIPT('parent.$.nmTop().close();'))
db[request.args(1)]['group_id'].default=request.args(2)
crud.settings.update_onaccept = lambda form: updater(form, 'update')
crud.settings.create_onaccept = lambda form: updater(form, 'create')
crud.settings.delete_onaccept = lambda form: updater(form, 'delete')
#crud.settings.delete_next = URL('plugin_PowerGrid','data',args='deleted')
return dict(form=crud())
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
def testcallback(x):
"""
THE JSON WHICH NEEDS TO BE RETURNED
{"entityList": [{"id": 1, "name": "Washington Botelho", "email": "gridy@wbotelhos.com"}], "total": 1}
ARGS RECEIVED
search=__&page=__&sortname=__&sortorder=__&find=__&rows=__&searchBy__
- search: the term you want to search;
- page: the number of the page you want to view;
- sortname: the name of the column of the database you want to sort by;
- sortorder: the order you want to sort the result: ascending or descending;
- find: the name of the column you want to search;
- rows: the number of rows you want to display in each page;
- You can append more attributes using the 'params' option.
- searchBy is the kind of search to be done. like, equal, notequal, startswith, endswith, gt, lt, ge, le
"""
return {"entityList": [
{"id": 1, "name": "wbotelhos", "email": "gridy@wbotelhos.com"},
{"id": 1, "name": "junb", "email": "gridy@wbotelhos.com"},
{"id": 1, "name": "erfb", "email": "gridy@wbotelhos.com"},
{"id": 1, "name": "bruno", "email": "gridy@wbotelhos.com"},
{"id": 1, "name": "uiolp", "email": "gridy@wbotelhos.com"},
],
"total": 5,
"headers":['id','name','email']} | Python |
import random
import os.path
def show():
quiz=request.args(0)
quiz_id=request.args(1)
if quiz=='personal':
content=db(db.quizpersonal.id==quiz_id).select().first()
elif quiz=='regional':
content=db(db.quizregional.id==quiz_id).select().first()
else:
content=False
if content:
return dict(content=content)
else:
redirect(URL('quiz', 'group', args=request.args(1)))
def personal():
memberships = db(db.auth_membership.user_id==auth.user_id)._select(db.auth_membership.group_id) or redirect(URL('index'))
row = db((db.auth_group.id.belongs(memberships)) & (db.auth_group.id==request.args(0)) & (~db.auth_group.role.contains(['user_', 'admins'], all=False))).select(db.auth_group.ALL, db.stats.ALL, orderby=~db.stats.total, left=db.stats.on(db.auth_group.id==db.stats.group_id)).first() or redirect(URL('index'))
#starten van sessie
if not session.quizNavigator:
session.quizNavigator = dict(type='random', list=[], group=request.args(0))
served_content=session.quizNavigator["list"]
question=db((db.quizpersonal.group_id==request.args(0)) & (~db.quizpersonal.id.belongs(served_content))).select(orderby='<random>').first()
if db((db.quizpersonal.group_id==request.args(0)) & (~db.quizpersonal.id.belongs(served_content))).isempty():
#Clear served_content
del session.quizNavigator
redirect(URL('quiz', 'group', args=request.args(0)))
session.quizNavigator["list"].append(question.id)
quiz_id=question.id
quiz_type="personal"
return dict(quiz_id=quiz_id,quiz_type=quiz_type)
def regional():
memberships = db(db.auth_membership.user_id==auth.user_id)._select(db.auth_membership.group_id) or redirect(URL('index'))
row = db((db.auth_group.id.belongs(memberships)) & (db.auth_group.id==request.args(0)) & (~db.auth_group.role.contains(['user_', 'admins'], all=False))).select(db.auth_group.ALL, db.stats.ALL, orderby=~db.stats.total, left=db.stats.on(db.auth_group.id==db.stats.group_id)).first() or redirect(URL('index'))
#starten van sessie
if not session.quizNavigator:
session.quizNavigator = dict(type='random', list=[], group=request.args(0))
served_content=session.quizNavigator["list"]
question=db((db.quizregional.group_id==request.args(0)) & (~db.quizregional.id.belongs(served_content))).select(orderby='<random>').first()
if db((db.quizregional.group_id==request.args(0)) & (~db.quizregional.id.belongs(served_content))).isempty():
#Clear served_content
del session.quizNavigator
redirect(URL('quiz', 'group', args=request.args(0)))
session.quizNavigator["list"].append(question.id)
quiz_id=question.id
quiz_type="regional"
response.view='emulator/personal.html'
return dict(quiz_id=quiz_id,quiz_type=quiz_type) | Python |
import logging
module_logger = logging.getLogger('SP.controllers.server')
def index():
return ""
@auth.requires_login()
@service.xmlrpc
def get_version():
"""Returns the version of the braintrainer XMLRPC protocol used.
This is here to make sure that a next version protocol can be easily implemented without breaking stuff"""
version = settings.XMLRPCVersion
return dict(result=version)
@auth.requires_login()
@service.xmlrpc
def get_session_id():
"""Should be called right after creating the proxy in the client,
because each client MUST have an unique session_id.
Returns unique UUID"""
session_id = _get_session_id()
return dict(result=session_id)
@auth.requires_login()
@service.xmlrpc
def open_and_return_session():
"""Adds client info to the table 'db.session' to create a session for the client.
Returns db.session.id"""
session_id = _open_and_return_session()
return dict(result=session_id)
@auth.requires_login()
@service.xmlrpc
def close_session(session_id):
"""Registers the end_time of the session and closes the session."""
_close_session(session_id)
@auth.requires_login()
@service.xmlrpc
def get_levelupcount(session_id, game_name):
"""Gets the amount of turns which ends a level
Return Integer
Parameters: String session_id - unique client session ID
String game_name - game_name of the current client"""
levelupcount = _get_levelupcount(session_id, game_name)
return dict(result=levelupcount)
@auth.requires_login()
@service.xmlrpc
def get_levels(session_id, game_name):
"""Gets amount of levels
Return Integer
Parameters: String session_id - unique client session ID
String game_name - game_name of the current client"""
levels = _get_levels(session_id, game_name)
return dict(result=levels)
@auth.requires_login()
@service.xmlrpc
def get_user(session_id):
"""Gets name of user
Return String
Parameters: String session_id - unique client session ID"""
user = _get_user(session_id)
return dict(result=user)
@auth.requires_login()
@service.xmlrpc
def get_difficulty(session_id):
"""Gets the current level the client should be running
Return Integer
Parameters: String session_id - unique client session ID"""
difficulty = _get_difficulty(session_id)
return dict(result=difficulty)
@auth.requires_login()
@service.xmlrpc
def set_difficulty(session_id, level):
_set_difficulty(session_id, level)
@auth.requires_login()
@service.xmlrpc
def set_level_end(session_id, game_data):
#data = {'store_db':False, 'levelup':False, 'score':0}
"""Gets the current level the client should be running
Parameters: String session_id - unique client session ID
Dictionary game_data - dictionary with game_data to decide whether to level up or down
Format: game_data - {'level':Integer, 'store_db':Boolean, 'levelup':Boolean, 'score':Integer}"""
_set_level_end(session_id, game_data)
@auth.requires_login()
@service.xmlrpc
def set_start_time(session_id):
"""Sets start_time for current played level.
Parameters: String session_id - unique client session ID"""
_set_start_time(session_id)
@auth.requires_login()
@service.xmlrpc
def set_end_time(session_id):
"""Sets end_time for current played level.
Parameters: String session_id - unique client session ID"""
_set_end_time(session_id)
@auth.requires_login()
@service.xmlrpc
def calculate_time(session_id):
timespend = _calculate_time(session_id)
return dict(result=timespend)
## DEFAULT WEB2PY CONTROLLER STUFF
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
#@auth.requires_login()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
@auth.requires_login()
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@auth.requires_signature()
def data():
"""
http://..../[app]/default/data/tables
http://..../[app]/default/data/create/[table]
http://..../[app]/default/data/read/[table]/[id]
http://..../[app]/default/data/update/[table]/[id]
http://..../[app]/default/data/delete/[table]/[id[
http://..../[app]/default/data/select/[table]
http://..../[app]/default/data/search/[table]
but URLs bust be signed, i.e. linked with
A('table',_href=URL('data/tables',user_signature=True))
or with the signed load operator
LOAD('default','data.load',args='tables',ajax=True,user_signature=True)
"""
return dict(form=crud())
| Python |
#error handler
error=URL(r=request,f='error')
def index():
"""
Start page of games module
"""
try:
arguments=request.args(0)
except:
arguments = None
title="Formatics games"
messages=[]
messages.append("Kies een spel")
return dict(title=title, messages=messages)
def arkanoid():
"""
Serve arkanoid for google chrome
"""
title="Let's play arkanoid"
return dict(title=title)
| Python |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a samples controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
"""
title='Choose a task'
message=T('Welcome to the game server webbased control panel')
menuitems=[]
menuitems.append(('',T(''), ''))
menuitems.append(('64_usermanagement',T('Sessions'),'sessions'))
menuitems.append(('64_usermanagement',T('RPC Calls'),'rpc_calls'))
#menuitems.append(('64_groupmanagement',T('Manage groups'),'manageGroups'))
#menuitems.append(('64_importdata',T('Import data'),'importMenu'))
#menuitems.append(('64_exportdata',T('Export data'),'exportMenu'))
return dict(title=title, message=message, menuitems=menuitems)
@auth.requires_membership('admins')
def sessions():
response.view='dashboard/manage.html'
title=T("Manage sessions")
grid = SQLFORM.grid(db.session,
searchable=True,
sortable=True,
paginate=30,
deletable=False,
editable=True,
details=True,
selectable=None,
create=True,
csv=False,
links_in_grid=None,
#linked_tables=['Category'],
user_signature= None,
#maxtextlengths={'vvb_items.title':30,'vvb_items.summary':75},
maxtextlength=20,
onvalidation=None,
oncreate=None,
onupdate=None,
ondelete=None,
sorter_icons=('[^]','[v]'),
#ui = 'jquery-ui',
#showbuttontext=None,
_class="web2py_grid",
formname='web2py_grid')
return dict(title=title, grid=grid)
@auth.requires_membership('admins')
def rpc_calls():
response.view='dashboard/manage.html'
title=T("Manage rpc_calls")
grid = SQLFORM.grid(db.rpc_call,
searchable=True,
sortable=True,
paginate=30,
deletable=False,
editable=True,
details=True,
selectable=None,
create=True,
csv=False,
links_in_grid=None,
#linked_tables=['Category'],
user_signature= None,
#maxtextlengths={'vvb_items.title':30,'vvb_items.summary':75},
maxtextlength=20,
onvalidation=None,
oncreate=None,
onupdate=None,
ondelete=None,
sorter_icons=('[^]','[v]'),
#ui = 'jquery-ui',
#showbuttontext=None,
_class="web2py_grid",
formname='web2py_grid')
return dict(title=title, grid=grid)
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
#@auth.requires_membership('admins')
@auth.requires_login()
def adminPanel():
form1=SQLFORM(db.auth_membership)
if form1.process().accepted:
redirect(URL())
elif form1.errors:
response.flash = T('form has errors')
else:
response.flash = T('please fill the form')
form2=SQLFORM(db.auth_group)
if form2.process().accepted:
redirect(URL())
elif form2.errors:
response.flash = T('form has errors')
else:
response.flash = T('please fill the form')
allGroups=db(db.auth_group).select()
allMemberships=db(db.auth_membership).select()
return dict(form1=form1,form2=form2,allGroups=allGroups,allMemberships=allMemberships)
@auth.requires_membership('admins')
def deleteGroup():
query = db(db.auth_group.id==request.args(0)).select().first()
remove = db(db.auth_group.id==query).delete()
if remove:
redirect(URL('adminPanel'))
return dict(remove=remove)
#TODO ENABLE ON PRODUCTION
#@auth.requires_membership('admins')
def deleteMembership():
query = db(db.auth_membership.id==request.args(0)).select().first()
remove = db(db.auth_membership.id==query).delete()
if remove:
redirect(URL('adminPanel'))
return dict(remove=remove)
@auth.requires_login()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@auth.requires_signature()
def data():
"""
http://..../[app]/default/data/tables
http://..../[app]/default/data/create/[table]
http://..../[app]/default/data/read/[table]/[id]
http://..../[app]/default/data/update/[table]/[id]
http://..../[app]/default/data/delete/[table]/[id[
http://..../[app]/default/data/select/[table]
http://..../[app]/default/data/search/[table]
but URLs bust be signed, i.e. linked with
A('table',_href=URL('data/tables',user_signature=True))
or with the signed load operator
LOAD('default','data.load',args='tables',ajax=True,user_signature=True)
"""
return dict(form=crud())
| Python |
@auth.requires_login()
def index():
redirect(URL('quiz', 'index'))
return
@auth.requires_login()
def download():
import os
import shutil
import uuid
import time
import zipfile
import contenttype as c
from PIL import Image
memberships = db(db.auth_membership.user_id==auth.user_id)._select(db.auth_membership.group_id) or redirect(URL('index'))
row = db((db.auth_group.id.belongs(memberships)) & (db.auth_group.id==request.args(0)) & (~db.auth_group.role.contains(['user_', 'admins'], all=False))).select(db.auth_group.ALL, db.stats.ALL, orderby=~db.stats.total, left=db.stats.on(db.auth_group.id==db.stats.group_id)).first() or redirect(URL('index'))
#quiz is either personal or regional
quiz = row.auth_group.quiz[4:]
group_id = row.auth_group.id
if quiz == 'personal':
content=db(db.quizpersonal.group_id == group_id).select()
elif quiz == 'regional':
content=db(db.quizregional.group_id == group_id).select()
folderLocation = '%sexports/%s/quiz_%s' % (request.folder, group_id, quiz)
zipLocation = '%sexports/%s/quiz_%s.zip' % (request.folder, group_id, quiz)
# if os.path.exists(folderLocation):
# return
if os.path.isfile(zipLocation):
zipNewLocation = '%sexports/%s/quiz_%s_%s.zip' % (request.folder, group_id, quiz, time.strftime("%Y-%m-%d_%H-%M-%S"))
#shutil.move(zipLocation, zipNewLocation)
systemCMD = "mv %s %s" % (zipLocation, zipNewLocation)
os.system(systemCMD)
for i in range(len(content)):
content[i].quiz = quiz
if content[i].file:
fileOrgName = content[i].file
fileName = 'game_quiz%s_%s_width_390.jpg' % (quiz, content[i].id)
fileLocation = '%sexports/%s/quiz_%s/Images/%s' % (request.folder, group_id, quiz, fileName)
resizeImage(fileOrgName, 390, fileLocation, "jpg", "JPEG")
#ADD fields for SQL file
content[i].resizedfile = 'static/images/thumbs/390/%s.jpg' % (os.path.splitext(fileOrgName)[0])
#content[i].file = fileName
content[i].filesize = os.path.getsize(fileLocation)
#content[i].filesize = 0
else:
content[i].resizedfile = ''
content[i].filesize = ''
d = os.path.dirname('%s/' % (folderLocation))
if not os.path.exists(d):
os.makedirs(d)
#os.chdir(folderLocation)
sqlContent = response.render('export/content.html', content=content)
sqlLocation = '%sexports/%s/quiz_%s/content.sql' % (request.folder, group_id, quiz)
optionsContent = response.render('export/options.html')
optionsLocation = '%sexports/%s/quiz_%s/options.rc' % (request.folder, group_id, quiz)
sqlFile = open(sqlLocation, 'w+')
sqlFile.write(sqlContent)
sqlFile.close()
optionsFile = open(optionsLocation, 'w+')
optionsFile.write(optionsContent)
optionsFile.close()
#os.chdir(request.global_settings.gluon_parent)
os.chdir('%sexports/%s' % (request.folder, group_id))
systemMOD = 'chmod -R 777 quiz_%s' % (quiz)
os.system(systemMOD)
systemCMD = 'zip -rq -P "Stas roelt ook nu als een gek" quiz_%s.zip quiz_%s' % (quiz, quiz)
os.system(systemCMD)
systemMOD = 'chmod 777 quiz_%s.zip' % (quiz)
os.system(systemMOD)
os.chdir(request.global_settings.gluon_parent)
if os.path.isfile(zipLocation):
systemCMD = 'rm -rf %s' % (folderLocation)
os.system(systemCMD)
zipName = 'quiz_%s.zip' % (row.auth_group.quiz[4:])
#zipLocation = '%sexports/%s/quiz_%s.zip' % (request.folder, row.auth_group.id, row.auth_group.quiz[4:])
response.headers['Content-Type'] = c.contenttype(zipLocation)
response.headers['Content-Disposition'] = 'attachment; filename=%s' % (zipName) # to force download as attachment
return response.stream(open(zipLocation,'rb'),chunk_size=4096) | Python |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a samples controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
"""
title='BrainTrainerPlus™ Game server v1'
message=T('Welkom op de backend van de gameserver voor de BrainTrainerPlus™.')
#fetch uncompleted todo items
todo=db(db.todo.isCompleted==False).select()
return dict(title=title, message=message, todo=todo)
@auth.requires_membership('admins')
def manageTodo():
title=T("Manage Todo list")
grid = SQLFORM.smartgrid(db.todo,
searchable=True,
sortable=True,
paginate=30,
deletable=False,
editable=True,
details=True,
selectable=None,
create=True,
csv=False,
links_in_grid=None,
#linked_tables=['Category'],
user_signature= None,
#maxtextlengths={'vvb_items.title':30,'vvb_items.summary':75},
maxtextlength=20,
onvalidation=None,
oncreate=None,
onupdate=None,
ondelete=None,
sorter_icons=('[^]','[v]'),
#ui = 'jquery-ui',
#showbuttontext=None,
_class="web2py_grid",
formname='web2py_grid')
return dict(title=title, grid=grid)
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
#@auth.requires_membership('admins')
@auth.requires_login()
def adminPanel():
form1=SQLFORM(db.auth_membership)
if form1.process().accepted:
redirect(URL())
elif form1.errors:
response.flash = T('form has errors')
else:
response.flash = T('please fill the form')
form2=SQLFORM(db.auth_group)
if form2.process().accepted:
redirect(URL())
elif form2.errors:
response.flash = T('form has errors')
else:
response.flash = T('please fill the form')
allGroups=db(db.auth_group).select()
allMemberships=db(db.auth_membership).select()
return dict(form1=form1,form2=form2,allGroups=allGroups,allMemberships=allMemberships)
@auth.requires_membership('admins')
def deleteGroup():
query = db(db.auth_group.id==request.args(0)).select().first()
remove = db(db.auth_group.id==query).delete()
if remove:
redirect(URL('adminPanel'))
return dict(remove=remove)
#TODO ENABLE ON PRODUCTION
#@auth.requires_membership('admins')
def deleteMembership():
query = db(db.auth_membership.id==request.args(0)).select().first()
remove = db(db.auth_membership.id==query).delete()
if remove:
redirect(URL('adminPanel'))
return dict(remove=remove)
@auth.requires_login()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@auth.requires_signature()
def data():
"""
http://..../[app]/default/data/tables
http://..../[app]/default/data/create/[table]
http://..../[app]/default/data/read/[table]/[id]
http://..../[app]/default/data/update/[table]/[id]
http://..../[app]/default/data/delete/[table]/[id[
http://..../[app]/default/data/select/[table]
http://..../[app]/default/data/search/[table]
but URLs bust be signed, i.e. linked with
A('table',_href=URL('data/tables',user_signature=True))
or with the signed load operator
LOAD('default','data.load',args='tables',ajax=True,user_signature=True)
"""
return dict(form=crud())
| Python |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a samples controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
#print response
def index():
buttons=[
('Live Demo',URL('default','proxy')+'/${link}','_self','Live demo for ${feature}','positive left button','book'),
('Docs',URL('plugin_PowerGrid','data',args=['read','features'])+'/${id}','_blank','Docs for ${feature}','modal right button','rightarrow'),
]
if auth.has_membership('admin'):
buttons+= [('edit',URL('plugin_PowerGrid','data',args=['update','features'])+'/${id}','_blank','Editing Record ${id}','refreshmodal middle button', 'pen'),
('delete',URL('plugin_PowerGrid','data',args=['delete','features'])+'/${id}','_blank','Are you sure you want to delete record ${id}','confirmationmodal right negative button', 'cross')]
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','features', extension='json'),
buttons=buttons,
addurl=URL('plugin_PowerGrid','data',args=['create','features']),
hideaddbutton=False if auth.has_membership('admin') else True,
headers=[['feature','Feature'], ['text','What?']],
minH=600,minW=800,
options=dict(colsWidth=[200,400],find='text',rows=10)
)
return dict(p=p)
def proxy():
redirect(URL('default',request.args(0)))
def features():
from plugin_PowerGrid.CallBack import CallBack
return CallBack(db.features.id>0)
def blog():
response.view = 'default/index.html'
# passing a custom jquery template bu string
# ${fieldname} evaluates to field value
# {{html htmlValueField}} parses the html
# Buttons:
# - class 'modal' to open in modal (when used, target needs to be '_blank')
# - class 'button' to decorate button - see CSS3 Buttons documentation
# - <span class='icon plus'></span> inside <a class='button'> to decorate button with icon (see CSS3 buttons docs)
# - recommend to always wrap with div, ul, p
template = """
<div class='powergrid-post' style='height:500px !important;border:none !important;'>
<hr />
<h1 style='background-color:#ccc;line-height:60px;height:60px;width:100%;'> ${name} - Posted on: ${date}</h1>
<hr />
<div><a class='modal' target='_blank' href='/PowerGrid/default/download/${picture}'><img height='250' src='/PowerGrid/default/download/${picture}' /></a></div>
<div class='powergrid_buttons'>
<a title='Comments for post ${id}' href="/PowerGrid/plugin_PowerGrid/data/select/comments/${id}" class='button modal' target='_blank'>Comments</a>
<a title='Edit post ${id}' minH='800' href="/PowerGrid/plugin_PowerGrid/data/update/products/${id}" class='button positive refreshmodal' target='_blank'>Edit Post</a>
</div>
<br />
<br />
<div>${manufacturer} / ${group_id} </div>
<div style='clear:both;'></div>
<div><p>{{html text}}</p></div>
</div>
"""
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback_with_virtualfields', extension='json'),
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
hideaddbutton=False,
addLabel="New post",
headers=[['','']], #comment or define headers here to include sorters
template=template,
_style='border:none;',
options=dict(colsWidth=[None],
rows=2,
width=760,
clickFx=False,
hoverFx=False,
findsName=[['text','Search on post']],
find='text',
resultText= 'Showing {from} - {to} to {total} posts',
sortOrder='desc',
sortName='id')
)
p.insert(0,CENTER(H1('My Powerful blog post pagination')))
p.insert(1,CENTER(H2('Customizable html template and css!')))
p.insert(2,BR())
p.insert(3,STYLE("""#wrapper {width: 800px !important;}"""))
return dict(p=p)
def jscallbacks():
response.view = 'default/index.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json') if request.args(0) != 'error' else '',
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive button','magnifier'),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['group_id','Group ID']],
hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
#templatetype='grid',
#minW=1600,
minH=500,
options=dict(colsWidth=[30,200,200,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
success="""js[function(){alert('Executed on load success');}]js""" if request.args(0) == 'success' else '' ,
before="""js[function(){alert('Executed before load');}]js""" if request.args(0) == 'before' else '',
error="""js[function(){alert('Executed when load error');}]js""" if request.args(0) == 'error' else '',
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
#find='name',search='J',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
icon = SPAN(_class='clock icon')
p.insert(0,DIV(A(icon,'On Error',_class='button',_href=URL('default','jscallbacks',args='error')),
SPAN(XML(' '*4)),
A(icon,'Before Load',_class='button',_href=URL('default','jscallbacks',args='before')),
SPAN(XML(' '*4)),
A(icon,'After Successful Load',_class='button',_href=URL('default','jscallbacks',args='success')),
_style='margin-bottom:20px;margin-right:20px !important;'))
return dict(p=p)
def buttons():
response.view = 'default/index.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json'),
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive left button','magnifier',[600,500]),
('edit',URL('plugin_PowerGrid','data',args=['update','products'])+'/${id}','_blank','Editing Record ${id}','refreshmodal middle button', 'pen',[600,800]),
('delete',URL('plugin_PowerGrid','data',args=['delete','products'])+'/${id}','_blank','Are you sure you want to delete record ${id}','confirmationmodal right negative button', 'cross'),
('google','http://www.google.com/#hl=en-US&source=hp&q=${name}','_blank','Googling for ${name}','modal right button', 'pin',[900,600]),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['group_id','Group ID']],
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
minH=800,
options=dict(colsWidth=[30,200,200,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
#find='name',search='J',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def scroll():
response.view = 'default/index.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json'),
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive button','magnifier',[600,500]),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['group_id','Group ID']],
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
options=dict(colsWidth=[30,200,200,100],
width=900,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
#find='name',search='J',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
rows=100,
rowsNumber=[100],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
scroll=True,height=300,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def nocontrol():
response.view = 'default/index.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json'),
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive left button','magnifier',[600,500]),
('edit',URL('plugin_PowerGrid','data',args=['update','products'])+'/${id}','_blank','Editing Record ${id}','refreshmodal middle button', 'pen',[600,800]),
('delete',URL('plugin_PowerGrid','data',args=['delete','products'])+'/${id}','_blank','Are you sure you want to delete record ${id}','confirmationmodal right negative button', 'cross'),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['group_id','Group ID']],
hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
options=dict(colsWidth=[30,200,200,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
#find='name',search='J',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def defaultsearch():
response.view = 'default/index.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json'),
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive left button','magnifier',[600,500]),
('edit',URL('plugin_PowerGrid','data',args=['update','products'])+'/${id}','_blank','Editing Record ${id}','refreshmodal middle button', 'pen',[600,800]),
('delete',URL('plugin_PowerGrid','data',args=['delete','products'])+'/${id}','_blank','Are you sure you want to delete record ${id}','confirmationmodal right negative button', 'cross'),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Click here to add a new record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['group_id','Group ID']],
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
options=dict(colsWidth=[30,200,200,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
find=request.vars.where or 'manufacturer',search=request.vars.what or 'Ferrari',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def noadd():
response.view = 'default/index.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json'),
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive left button','magnifier',[600,500]),
('edit',URL('plugin_PowerGrid','data',args=['update','products'])+'/${id}','_blank','Editing Record ${id}','refreshmodal middle button', 'pen',[600,800]),
('delete',URL('plugin_PowerGrid','data',args=['delete','products'])+'/${id}','_blank','Are you sure you want to delete record ${id}','confirmationmodal right negative button', 'cross'),
('google','http://www.google.com/#hl=en-US&source=hp&q=${name}','_blank','Googling for ${name}','modal right button', 'pin',[900,600]),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['group_id','Group ID']],
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
options=dict(colsWidth=[30,200,200,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
#find='name',search='J',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def nosearch():
response.view = 'default/index.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json'),
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive left button','magnifier',[600,500]),
('edit',URL('plugin_PowerGrid','data',args=['update','products'])+'/${id}','_blank','Editing Record ${id}','refreshmodal middle button', 'pen',[600,800]),
('delete',URL('plugin_PowerGrid','data',args=['delete','products'])+'/${id}','_blank','Are you sure you want to delete record ${id}','confirmationmodal right negative button', 'cross'),
('google','http://www.google.com/#hl=en-US&source=hp&q=${name}','_blank','Googling for ${name}','modal right button', 'pin',[900,600]),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['group_id','Group ID']],
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
options=dict(colsWidth=[30,200,200,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
#find='name',search='J',
searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def gridinpopup():
response.view = 'default/index.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json'),
buttons=[
('Open another grid',URL('default','onlydata',args=['layout'],vars=dict(where='id'))+'&what=${id}','_blank','Sub grid of record ${id}','refreshmodal positive pill big button','uparrow'),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['group_id','Group ID']],
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
minW=930,
minH=300,
options=dict(colsWidth=[30,200,200,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
find=request.vars.where or '',search=request.vars.what or '',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def toexcel():
response.view = 'default/index.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json'),
buttons=[
('export to excel',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive big button','book'),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['group_id','Group ID']],
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
options=dict(colsWidth=[30,200,200,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
#find='name',search='J',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def onlydata():
response.view = 'default/index.html' if request.args(0) != 'layout' else 'default/index2.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json'),
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive button','magnifier',[600,500]),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['group_id','Group']],
as_html=True,
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
options=dict(colsWidth=[30,200,200,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
find=request.vars.where or '',search=request.vars.what or '',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def orders():
response.view = 'default/index.html' if request.args(0) != 'layout' else 'default/index2.html'
callback = 'orderscallback' if (not request.vars.get('product',None)) else 'order_by_id'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default',callback, extension='json') ,
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','orders'])+'/${id}','_blank','Details of Record ${id}','modal positive left pill button','magnifier',[600,250]),
('edit',URL('plugin_PowerGrid','data',args=['update','orders'])+'/${id}','_blank','Editing Record ${id}','refreshmodal right pill button', 'pen',[600,500]),
],
addurl=URL('plugin_PowerGrid','data',args=['create','orders']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['customer','Customer'], ['prods','Products'],['date','Date'],['status','status']],
as_html=True,
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
options=dict(colsWidth=[50,150,200,100,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
find=request.vars.where or '',search=request.vars.what or '',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
findsName=[['id','id'],['customer','Customer Name'],['products','Products'],['status','status']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def order_by_id():
class Virtual(object):
def date(self):
return self.orders.order_date.strftime('%d/%m/%Y')
def prods(self):
#print type(self.orders.products)
return ' / '.join([str(p) for p in self.orders.products])
from plugin_PowerGrid.CallBack import CallBack
return CallBack(db.orders.products.contains(request.vars.product), virtualfields=[Virtual()])
def orderscallback():
class Virtual(object):
def date(self):
return self.orders.order_date.strftime('%d/%m/%Y')
def prods(self):
#print type(self.orders.products)
return ' / '.join([str(p) for p in self.orders.products])
from plugin_PowerGrid.CallBack import CallBack
return CallBack(db.orders.id>0, virtualfields=[Virtual()])
def htmlinside():
response.view = 'default/index.html' if request.args(0) != 'layout' else 'default/index2.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback', extension='json'),
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive button','magnifier',[600,500]),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Id'], ['name','Name'], ['manufacturer','Manufacturer'],['text','Text']],
as_html=True,
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
#hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
options=dict(colsWidth=[30,200,200,100],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
find=request.vars.where or '',search=request.vars.what or '',
#searchOption=False,
#searchButtonLabel='Buscar',
#searchButtonTitle='Clique para buscar',
#searchFocus=False,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
#noResultText='I found nothing ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
def withimages():
response.view = 'default/index.html'
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','callback_with_virtualfields', extension='json'),
buttons=[
('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive button','magnifier',[600,500]),
],
addurl=URL('plugin_PowerGrid','data',args=['create','products']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','id'], ['name','name'], ['manufacturer','manufacturer'],['category','category'],['image','image']],
#as_html=False,
options=dict(colsWidth=[30,200,200,150,110]),
)
return dict(p=p)
def callback_with_virtualfields():
from plugin_PowerGrid.CallBack import CallBack
class Virtual(object):
def image(self):
return str(A(IMG(_width=100,_src=URL('default','download',args=self.products.picture)),
_href=URL('default','download',args=self.products.picture),
_class='modal',
_title='image of record %s' % self.products.id,
_w=900,
_h=900
)
)
return CallBack(db.products.id>0, virtualfields=[Virtual()])
def callback():
from plugin_PowerGrid.CallBack import CallBack
return CallBack(db.products.id>0)
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
#@auth.requires_signature()
def data():
"""
http://..../[app]/default/data/tables
http://..../[app]/default/data/create/[table]
http://..../[app]/default/data/read/[table]/[id]
http://..../[app]/default/data/update/[table]/[id]
http://..../[app]/default/data/delete/[table]/[id[
http://..../[app]/default/data/select/[table]
http://..../[app]/default/data/search/[table]
but URLs bust be signed, i.e. linked with
A('table',_href=URL('data/tables',user_signature=True))
or with the signed load operator
LOAD('default','data',args='tables',ajax=True,user_signature=True)
"""
if request.args(0) == 'deleted':
return dict(message='Deleted')
crud.settings.controller = 'default'
crud.settings.download_url = URL('download')
def updater(form):
if form.errors:
form.append(SCRIPT('alert("Erro");'))
else:
form.append(SCRIPT('parent.$.nmTop().close();'))
crud.settings.update_onaccept = lambda form: updater(form)
crud.settings.create_onaccept = lambda form: updater(form)
crud.settings.delete_next = URL('data',args='deleted')
return dict(form=crud())
def docs():
response.view = 'default/index.html'
return dict(p='Documentation is in progress of writing, take a look at examples menu and Index page')
def support():
from plugin_PowerGrid.PowerGrid import PowerScript
response.view = 'default/index.html'
response.files.append(URL('static','plugin_PowerGrid',args=['buttons','stylesheets','css3buttons.css']))
response.files.append(URL('static','plugin_PowerGrid',args=['modal','styles','nyroModal.css']))
response.files.append(URL('static','plugin_PowerGrid',args=['modal','js','jquery.nyroModal.custom.min.js']))
return dict(p=DIV(
H1('Doubts, Support, Implementation, Online Training - bruno@blouweb.com'),
A(SPAN(_class='icon comment'),'Forum',_title='Blouweb Labs Forum',_class='button big ',_href='https://www.facebook.com/board.php?uid=180466245330580',_target='_blank'),
HR(),
DIV(_id="disqus_thread"),
PowerScript("""
/* * * CONFIGURATION VARIABLES: EDIT BEFORE PASTING INTO YOUR WEBPAGE * * */
var disqus_shortname = 'blouweblabs'; // required: replace example with your forum shortname
// The following are highly recommended additional parameters. Remove the slashes in front to use.
var disqus_identifier = 'blouweb_powergrid_%s';
var disqus_url = 'http://labs.blouweb.com/PowerGrid/default/support';
/* * * DON'T EDIT BELOW THIS LINE * * */
(function() {
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
dsq.src = 'http://' + disqus_shortname + '.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();
""" % request.fucntion)
))
def comments():
response.view = 'default/index.html'
return dict(p='Leave a general message!')
def donate():
response.view = 'default/index.html'
return dict(p=XML(H1('Donate on Paypal to ',SPAN('bruno@blouweb.com',_style='color:blue;'))))
def get():
response.view = 'default/index.html'
return dict(p=A('Download at bitbucket',_href='https://bitbucket.org/rochacbruno/powergrid/downloads',_target='_blank'))
def about():
info = """ <h2>This plugin is poweredby:</h2>
<ul>
<li><a target="_blank" href='http://www.web2py.com'>web2py framework</a> </li>
<li><a target="_blank" href='http://cursodepython.com.br'>Python</a></li>
<li><a target="_blank" href='http://www.wbotelhos.com/gridy/'>wbotelhos Jquery Gridy</a></li>
<li><a target="_blank" href='http://nyromodal.nyrodev.com/'>Jquery nyroModal</a></li>
<li><a target="_blank" href='http://css3buttons.michaelhenriksen.dk/'>CSS3 Buttons</a></li>
<li><a target="_blank" href='http://labs.blouweb.com'>Blouweb Power Tools</a></li>
<li>*<a target="_blank" href='http://www.python-excel.org/'>python-excel xlwt & xlrd</a></li>
<li>*<a target="_blank" href='http://wijmo.com/widgets/wijmo-open/'>Wijmo Open</a></li>
</ul>
<b>* future implementations under development</b>"""
return dict(info=XML(info))
| Python |
import time
#requires settings.email*
while True:
rows = db(db.queue.status=='pending').select(db.queue.ALL, limitby=(0, settings.email_queue_max))
for row in rows:
if mail.send(to=row.email,
subject=row.subject,
message=row.message,
bcc=settings.email_bcc):
row.update_record(status='sent')
else:
row.update_record(status='failed')
db.commit()
time.sleep(settings.email_queue_interval) | Python |
#!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
# *-* encoding: utf8 *-*
from django.db import models
class Estatico(models.Model):
link = models.CharField(max_length=20)
label = models.CharField(max_length=50, verbose_name='Titulo')
conteudo = models.TextField()
def __unicode__(self):
return self.label
class Post(models.Model):
titulo = models.CharField(max_length=20)
conteudo = models.TextField()
author = models.TextField(default=u'Organização do Flisol Maringá', editable=False)
date = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.titulo
class Publicidade(models.Model):
AJUDA_CHOICES = (
(u'R', u'Realização'),
(u'P', u'Patrocinio'),
(u'A', u'Apoio'),
(u'L', u'Links'),
)
nome = models.CharField(max_length=50)
link = models.URLField(max_length=100)
logotipo = models.ImageField(upload_to='logo')
tipo = models.CharField(max_length=2, choices=AJUDA_CHOICES)
def __unicode__(self):
return self.nome
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
# -*- coding: utf-8 -*-
from django.contrib import admin
from flisol_system.conteudo.models import Estatico, Post, Publicidade
class PostAdmin(admin.ModelAdmin):
class Media:
js = ('/js/tiny_mce/tiny_mce.js', '/js/textareas.js')
admin.site.register(Estatico)
admin.site.register(Post, PostAdmin)
admin.site.register(Publicidade)
| Python |
#! -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext, Template, Context
from models import Estatico
from configuration.models import PageData
from conteudo.models import Post
def estatico(request, pagina):
page = "conteudo_estatico.html"
obj = eval("Estatico.objects.get(link='%s')" % pagina)
t = Template(obj.conteudo)
data = PageData()
label = obj.label
conteudo = t.render(Context())
return render_to_response('index.html', locals(),
context_instance=RequestContext(request))
def post(request, post):
data = PageData()
post = Post.objects.filter(id=post)
if len(post) > 0:
post = post[0]
else:
post = None
page = 'post.html'
return render_to_response('index.html', locals(),
context_instance=RequestContext(request))
| Python |
#!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/inscricoes/palestra/(?P<palestraId>\d+?)/presenca/$', 'flisol_system.inscricoes.views.listaPresenca'),
(r'^admin/contato/notificacao/$', 'flisol_system.contato.views.notificacao'),
(r'^admin/(.*)', admin.site.root),
(r'^media/(.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
(r'^(?P<res>\d+)/$', 'flisol_system.views.index'),
(r'^$', 'flisol_system.views.index'),
(r'^conteudo/(?P<pagina>.+?)$', 'flisol_system.conteudo.views.estatico'),
(r'^post/(?P<post>\d+)/$', 'flisol_system.conteudo.views.post'),
(r'^contato/$', 'flisol_system.contato.views.contato'),
(r'^chamada/$', 'flisol_system.inscricoes.views.proposta'),
(r'^inscricao/$', 'flisol_system.inscricoes.views.inscricao'),
(r'^historico/palestras/(?P<ano>\d+)/$', 'flisol_system.inscricoes.views.histpalestras'),
(r'^js/(?P<path>.*)$', 'django.views.static.serve', {'document_root': 'templates/js'}),
)
| Python |
import os
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'xxxxxx@gmail.com'
EMAIL_HOST_PASSWORD = 'xxxxxxxx'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'flisol', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': '123456', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Sao_Paulo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'pt-br'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT_PATH, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin_media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'm@5$@30h66*yx!nr3hk)72n%1*k=o-*=l_f4hhrk-5^bv$!!o@'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'flisol_system.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT_PATH,'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'flisol_system.contato',
'flisol_system.inscricoes',
'flisol_system.conteudo',
'flisol_system.configuration',
)
| Python |
# -*- encoding: utf-8 -*-
from django.db import models
from conteudo.models import Publicidade
# XXX fazer com que essa instancia seja criada na instalacao
class FlisolManager(models.Model):
ESTADO_CHOICES = (
(u'A', u'Aberto'),
(u'F', u'Fechado'),
)
# msg exibida qndo proposta está fechada
proposta_msg = models.TextField(verbose_name="Mensagem da Proposta",
help_text="Mensagem exibida quando as propostas de trabalhos estiverem fechada")
proposta = models.CharField(max_length=2, choices=ESTADO_CHOICES,
help_text="Abre e fecha a chamada de trabalhos")
# msg exibida qndo inscricao está fechada
inscricao_msg = models.TextField(verbose_name="Mensagem da Inscrição",
help_text="Mensagem exibida quando as inscrições estão fechada")
inscricao = models.CharField(max_length=2, choices=ESTADO_CHOICES,
help_text="Abre e fecha as inscrições")
# ano atual do evento
ano = models.IntegerField(help_text="Ano atual")
email = models.CharField(max_length=100, verbose_name="E-mail",
help_text="Email que irá receber as notificações")
def __unicode__(self):
return u'Configurações'
def getConf(self):
conf = FlisolManager.objects.all()
conf = conf[0]
return conf
# no futuro colocar:
# header
# css
class MenuSite(models.Model):
TIPO_CHOICES = (
(u'N', u'Nova aba'),
(u'A', u'Aba atual'),
)
menu = models.CharField(max_length=30, help_text='Nome que será exibido no menu')
link = models.CharField(max_length=200, help_text='Endereço para o qual o menu se refere')
novaaba = models.CharField(max_length=2, choices=TIPO_CHOICES, verbose_name='Tipo de abertura da página',
help_text='Assinala se o novo endereço será aberto em uma nova aba ou na aba atual')
class PageData:
def __init__(self):
self.patrocinios = Publicidade.objects.filter(tipo=u'P').order_by('id')
self.apoios = Publicidade.objects.filter(tipo=u'A').order_by('id')
self.realizacoes = Publicidade.objects.filter(tipo=u'R').order_by('id')
self.links = Publicidade.objects.filter(tipo=u'L').order_by('id')
self.menus = MenuSite.objects.all().order_by('id')
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
from django.contrib import admin
from models import FlisolManager, MenuSite
admin.site.register(FlisolManager)
admin.site.register(MenuSite)
| Python |
# Create your views here.
| Python |
#! -*- coding: utf-8 -*-
from django.db import models
from django import forms
from django.contrib.admin import widgets
class NotificacaoForm(forms.Form):
DEST_CHOICES = (
(u'T', u'Todos'),
(u'A', u'Apenas usuarios inscritos este ano'),
(u'I', u'Apenas usuarios que levarão a máquina para o INSTALLFEST'),
)
destinatario = forms.ChoiceField(choices=DEST_CHOICES)
titulo = forms.CharField(max_length=200)
mensagem = forms.CharField(widget=widgets.forms.Textarea)
def __unicode__(self):
return self.nome
class ContatoForm(forms.Form):
nome = forms.CharField(max_length=100, help_text='Digite seu nome completo. (campo obrigatório)')
email = forms.EmailField(help_text='Digite seu e-mail para podermos respondê-lo. (campo obrigatório)')
telefone = forms.CharField(max_length=20, required=False)
mensagem = forms.CharField(widget=widgets.forms.Textarea)
def __unicode__(self):
return self.nome
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
# -*- coding: utf-8 -*-
#from django.contrib import admin
#from models import Contato
#
#class ContatoAdmin(admin.ModelAdmin):
# list_display = ('nome','email','telefone')
#
#admin.site.register(Contato, ContatoAdmin)
| Python |
import smtplib
#from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email import Encoders
import os
import getopt
import sys
from email.Header import Header
class EmailMsg:
def __init__(self, title, msg):
self.title = title
self.msg = msg
class Email:
def __init__(self, login, passwd):
self.login = login
self.passwd = passwd
def send(self, to, mail):
body_charset = 'UTF-8'
msg = MIMEText(mail.msg, 'plain', body_charset)
msg['From'] = self.login
msg['To'] = to
msg['Subject'] = Header(mail.title, 'UTF-8')
mailServer = smtplib.SMTP("smtp.gmail.com", 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(self.login, self.passwd)
mailServer.sendmail(self.login, to, msg.as_string())
mailServer.close()
| Python |
from django import forms
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.forms import ModelForm
from django.template import RequestContext
from django.core.mail import EmailMessage
from models import ContatoForm
from configuration.models import FlisolManager, PageData
from inscricoes.models import Participante
from datetime import datetime
from string import Template
import time
from django.contrib.admin.views.decorators import staff_member_required
from contato.models import NotificacaoForm
def contato(request):
data = PageData()
if request.method=='POST':
form = ContatoForm(request.POST)
if form.is_valid():
manager = FlisolManager()
conf = manager.getConf()
dest = conf.email
nome = form.cleaned_data['nome']
email_contado = form.cleaned_data['email']
telefone = form.cleaned_data['telefone']
conteudo = "Nome: " + nome + "\n" \
+ "Email: " + email_contado + "\n" \
+ "Telefone: " + telefone + "\n" \
+ "------------------------------------------------\n" \
+ form.cleaned_data['mensagem']
subject = nome.title() + " fez contanto pelo site do flisol"
email = EmailMessage(subject, conteudo, to=[dest])
email.send()
form = ContatoForm()
sucesso = True
else:
erro = True
else:
form = ContatoForm()
form = form.as_table()
page = 'contato.html'
return render_to_response('index.html', locals(),
context_instance=RequestContext(request))
@staff_member_required
def notificacao(request):
if request.method == 'POST':
manager = FlisolManager()
conf = manager.getConf()
dest = conf.email
form = NotificacaoForm(request.POST)
if form.is_valid():
tipo_dest = form.cleaned_data['destinatario']
titulo = form.cleaned_data['titulo']
conteudo = form.cleaned_data['mensagem']
data_hj = datetime.today().strftime('%c')
if tipo_dest == u'T':
for i in Participante.objects.all():
palestrasInscritas = ''
time.sleep(1)
for palestra in i.palestras.order_by('horario'):
palestrasInscritas += '\t+ ' + palestra.titulo + '\t\t' + str(palestra.horario) + '\n'
conteudoCompilado = Template(conteudo).substitute(nome=i.nome, data=data_hj, palestras=palestrasInscritas)
email = EmailMessage(titulo, conteudoCompilado, to=[i.email])
email.send()
elif tipo_dest == u'I':
for i in Participante.objects.filter(instalar=True):
palestrasInscritas = ''
time.sleep(1)
for palestra in i.palestras.order_by('horario'):
palestrasInscritas += '\t+ ' + palestra.titulo + '\t\t' + str(palestra.horario) + '\n'
conteudoCompilado = Template(conteudo).substitute(nome=i.nome, data=data_hj, palestras=palestrasInscritas)
email = EmailMessage(titulo, conteudoCompilado, to=[i.email])
email.send()
return HttpResponse("participantes notificados")
else:
form = NotificacaoForm()
form = form.as_table()
return render_to_response(
'admin/contato/notificacao.html', locals(),
context_instance=RequestContext(request))
| Python |
from django.shortcuts import render_to_response
from django.template import RequestContext
from inscricoes.models import Palestra, Proposta
from conteudo.models import Post, Publicidade
from configuration.models import MenuSite
from django.http import HttpResponse
from configuration.models import PageData
def index(request, res=None):
page = 'noticias.html'
posts = Post.objects.all().order_by('-date')
numeroDePostsPorPagina = 6;
numeroDePosts = len(posts)
numeroDaPagina = 1
if res != None:
numeroDaPagina = int(res)
indexBase = (numeroDaPagina - 1) * numeroDePostsPorPagina
posts = posts[indexBase: indexBase + numeroDePostsPorPagina]
proximaPagina = numeroDaPagina + 1
paginaAnterior = numeroDaPagina - 1
exibirPaginaAnterior = False
if paginaAnterior > 0:
exibirPaginaAnterior = True
exibirProximaPagina = False
if indexBase + numeroDePostsPorPagina < numeroDePosts:
exibirProximaPagina = True
data = PageData()
return render_to_response('index.html', locals(),
context_instance=RequestContext(request))
| Python |
#! -*- coding: utf-8 -*-
from django.db import models
class Proposta(models.Model):
NIVEL_CHOICES = (
(u'B', u'Básico'),
(u'M', u'Médio'),
(u'A', u'Avançado'),
)
CATEGORIA_CHOICES = (
(u'P', u'Palestra'),
(u'O', u'Oficina/Palestra'),
(u'L', u'Lightning Talk'),
)
nome = models.CharField(max_length=100, help_text='Digite seu nome completo. (campo obrigatório)')
email = models.EmailField(help_text='(campo obrigatório)')
telefone = models.CharField(max_length=20, help_text='Digite um telefone para contato')
endereco = models.CharField(max_length=100, help_text='Endereço completo. tipo: rua, numero, bairro. (campo obrigatório)')
categoria = models.CharField(help_text='Qual a categoria da sua proposta?', max_length=2, choices=CATEGORIA_CHOICES)
titulo = models.CharField(max_length=200, help_text='Indique o título de sua palestra. (campo obrigatório)')
tema = models.CharField(max_length=100, help_text='Indique um tema para a sua palestra. (campo obrigatório)')
nivel = models.CharField(help_text='Qual o nível técnico da palestra?', max_length=2, choices=NIVEL_CHOICES)
proposta = models.TextField(help_text='Descreva em poucas linhas sua proposta de palestra. (campo obrigatório)')
bio = models.TextField(help_text='Descreva em poucas linhas um pouco de você e sua experiência.')
def __unicode__(self):
return self.nome
class Palestra(models.Model):
# utilizado para exibir a informacao para os inscritos
horario = models.TimeField()
titulo = models.CharField(max_length=200)
palestrante = models.CharField(max_length=100)
resumo = models.TextField()
# utilizado para controlar o numero de inscritos na palestra
vagas = models.IntegerField(help_text='Quantidade de vagas disponíveis para essa palestra')
# utilizado para exibir o histórico do flisol
ano = models.IntegerField(help_text='Ano em que a palestra será realizada')
apresentacao = models.FileField(upload_to='apresentacoes', blank=True,
help_text='Slides utilizados durante a apresentação')
presswidget = models.TextField(blank=True, help_text='Link para embutir a apresentação na página de histórico')
def __unicode__(self):
return self.titulo
class Participante(models.Model):
nome = models.CharField(max_length=100, help_text='Digite seu nome completo. (campo obrigatório)')
email = models.EmailField(help_text='(campo obrigatório)')
palestras = models.ManyToManyField(Palestra)
instalar = models.BooleanField(help_text='Você vai levar seu equipamento para instalações?', verbose_name='InstallFest')
def __unicode__(self):
return self.nome
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
# -*- coding: utf-8 -*-
from django.contrib import admin
from models import Palestra, Proposta, Participante
class ParticipanteAdmin(admin.ModelAdmin):
list_display = ('nome','email','instalar')
class PalestraAdmin(admin.ModelAdmin):
list_display = ('id','horario','titulo','palestrante', 'vagas')
class PropostaAdmin(admin.ModelAdmin):
list_display = ('nome','email', 'tema', 'titulo')
admin.site.register(Participante, ParticipanteAdmin)
admin.site.register(Palestra, PalestraAdmin)
admin.site.register(Proposta, PropostaAdmin)
| Python |
#! -*- coding: utf-8 -*-
from django import forms
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.forms import ModelForm
from django.template import RequestContext
from flisol_system.configuration.models import FlisolManager, PageData
from django.http import Http404
from models import Participante, Proposta, Palestra
from string import Template
from django.forms.models import BaseModelFormSet
from django.forms.models import modelformset_factory
class InscricaoModelForm(ModelForm):
''' Acrescentei o método construtor __init__ e todo o código dele para alterar as propriedade
do ManyToManyField além de poder alterar o texto de ajuda '''
# def __init__(self, *args, **kwargs):
# disponiveis = []
# for palestra in Palestra.objects.all():
# quantidade = palestra.vagas - Participante.objects.filter(palestras__id=palestra.id).count()
# if quantidade > 0:
# disponiveis.append((palestra.id, palestra.titulo))
# #self.base_fields['palestras'].widget = forms.CheckboxSelectMultiple(choices=disponiveis)
# self.base_fields['palestras'].widget = forms.CheckboxSelectMultiple(queryset=Palestra.objects.all())
# self.base_fields['palestras'].help_text = 'Marque as palestras que você quer participar. \
# #Obs: Verifique os horários conflitantes antes de escolher.<br>'
# super(InscricaoModelForm, self).__init__(*args, **kwargs)
#
class Meta:
model = Participante
fields = ('nome','email','palestras','instalar')
def inscricao(request):
data = PageData()
erro = False
if request.method=='POST':
form = InscricaoModelForm(request.POST)
erro_repetidos = []
erro_acabou = []
try:
escolhas = dict(form.data)['palestras']
except:
pass
else:
palestras = Palestra.objects.filter(id__in=escolhas)
for i in palestras:
for j in palestras:
if i.id != j.id and i.horario == j.horario:
erro_repetidos.append(i.titulo)
for i in palestras:
if i.vagas - Participante.objects.filter(palestras__id=i.id).count() <= 0 :
erro_acabou.append(i.titulo[0:20])
if form.is_valid() and not erro_repetidos and not erro_acabou:
form.save()
sucesso = True
form = InscricaoModelForm()
else:
erro = True
else:
# form = InscricaoModelForm()
print 'passei'
participanteFormset = modelformset_factory(Participante)
form = participanteFormset(queryset=Participante.objects.filter(id=5))
esgotadas = []
for i in Palestra.objects.all():
quantidade = i.vagas - Participante.objects.filter(palestras__id=i.id).count()
if quantidade <= 0:
esgotadas.append(i.titulo)
form = form.as_table()
manager = FlisolManager()
config = manager.getConf()
closed_msg = config.inscricao_msg
fechada = config.inscricao == 'F'
page = 'inscricao.html'
return render_to_response('index.html', locals(),
context_instance=RequestContext(request))
class PropostaModelForm(ModelForm):
class Meta:
model = Proposta
def proposta(request):
if request.method=='POST':
form = PropostaModelForm(request.POST)
if form.is_valid():
form.save()
sucesso = True
form = PropostaModelForm()
else:
erro = True
else:
manager = FlisolManager()
conf = manager.getConf()
closed_msg = conf.proposta_msg
fechada = conf.proposta == 'F'
form = PropostaModelForm()
data = PageData()
form = form.as_table()
page = 'proposta.html'
return render_to_response('index.html', locals(),
context_instance=RequestContext(request))
def histpalestras(request, ano):
page = 'histpalestras.html'
conf = FlisolManager.objects.all()
conf = conf[0]
ano_vigente = conf.ano
ano = int(ano)
if ano > ano_vigente or ano < 2005:
raise Http404
palestras = Palestra.objects.filter(ano=ano)
ano_anterior = ano - 1
ano_proximo = ano + 1
proximo = ano_proximo <= ano_vigente
anterior = ano_anterior > 2004
data = PageData()
return render_to_response('index.html', locals(),
context_instance=RequestContext(request))
def listaPresenca(request, palestraId):
participantes = Participante.objects.filter(palestras__id=palestraId)
palestra = Palestra.objects.get(id=palestraId)
return render_to_response('lista_presenca.html', locals(),
context_instance=RequestContext(request))
| Python |
#!/usr/bin/env python
##########################################################
##########################################################
## ##
## Flexible Registry Parser ##
## Author: Paul-Henri Huckel ##
## Contact: @FrOzPolow ##
## ##
##########################################################
##########################################################
#TODO: Manage user list
#TODO: MRUList formating
import hivex
import sys
import getopt
import re
import os
import codecs
import binascii
import datetime
from xml.etree import ElementTree
from optparse import OptionParser, Option, OptionValueError
VERBOSITY = 0
###
### Return path of the hive file
###
###
def get_hive_location(category, item, OS):
print item
try:
f = open(conf_file)
tree = ElementTree.parse(f)
except:
print " Something is wrong with the conf file path given"
return
for node in tree.findall('./' + category + '/' + item + '/key'):
key = node.attrib.get('OS')
if key == OS:
string = node.attrib.get('Hive')
try:
f2 = open(hive_dir +"/" + string)
except:
print " Something is wrong with the hive file, please check the conf file at line", OS, category, item
return
return hive_dir +"/" + string
return
###
### print & Format output according to the data type
### val: list - val[0]=type, val[1]=value
### return: string - value in val[1]
###
def print_fct ( val ):
if val:
# string value
if val[0] == 1:
return val[1]
# expandable string value
if val[0] == 2:
return val[1]
# Binary data
if val[0] == 3:
# return "The following is a REG_BINARY value, consider decoding the Hex value: "+''.join(val[1]).encode('hex')
return ''.join(val[1]).encode('hex')
# Little Indian 32bits unsigned integer
if val[0] == 4:
return ''.join(val[1]).encode('hex')
# Big Indian 32bits unsigned integer
if val[0] == 5:
return ''.join(reversed(val[1])).encode('hex')
# Long String
if val[0] == 7:
return val[1]
else:
return " This key is not defined within the hive file"
return "VALUE TYPE NOT SUPPORTED"
###
### Looks in the XML file and return a list of each subkey leading to the node requested
### category: String
### item: string
### OS: string
### return: list
###
def get_key_list (category, item, OS, username = ""):
i = 0
string = ""
f = open(conf_file)
tree = ElementTree.parse(f)
for node in tree.findall('./' + category + '/' + item + '/key'):
key = node.attrib.get('OS')
if key == OS:
string = node.attrib.get('path')
break
key_path = string.split('\\', -1)
for key in key_path:
if key == "CurrentControlSet":
currentcontrolset = get_currentcontrolset()
key_path[i] = "ControlSet00" + currentcontrolset
i = i + 1
return key_path
###
### Builds the list of values requested in the conf file
### category: String
### item: string
### OS: string
### return: list
###
def get_values_list ( category, item, OS ):
values_list = []
string = ""
f = open(conf_file)
tree = ElementTree.parse(f)
for node in tree.findall('./' + category + '/' + item + '/key'):
key = node.attrib.get('OS')
if key == OS:
string = node.attrib.get('values')
break
if string:
values_list = string.split(',', -1)
return values_list
###
### Look which controlset is the one currently in use
### return: string - controlset
###
def get_currentcontrolset ():
global hive_dir
try:
h = hivex.Hivex(hive_dir+"/system")
hive = "/system"
except:
try:
h = hivex.Hivex(hive_dir+"/SYSTEM")
hive="/SYSTEM"
except:
print "Could not find the current controlset... Did you provide the System hive?"
return
system_hive = hive_dir+hive
h = hivex.Hivex(system_hive)
key = h.root()
key = h.node_get_child(key, "Select")
val = h.node_get_value(key, "Current")
val = h.value_value(val)
return str(int(''.join(reversed(val[1])).encode('hex'), 16))
###
### Return modified time of a key
###
def get_modified_time (hive, path):
h = hivex.Hivex(hive)
key = get_node(hive, path)
time = h .node_timestamp(key)
time = int((time / 10000000) - 11644473600)
return datetime.datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')
###
### Print single value found of a single key found in path
### adds the last written time & path depending on the verbosity set by the user
###
def get_value (hive, path, value_list, timestamp):
global VERBOSITY
h = hivex.Hivex(hive)
key = get_node(hive, path)
i = 0
write_path = ""
if not key:
print "Key not defined in the Hive file Or no values found"
if value_list and value_list[0] == "*":
value_list = h.node_values(key)
while i < len(value_list):
value_list[i] = h.value_key(value_list[i])
i = i + 1
if (timestamp == 1) or (int(VERBOSITY) >= 2):
print " Last Written Time: " + get_modified_time(hive, path)
if (int(VERBOSITY) == 1) or (int(VERBOSITY) == 3):
write_path = hive + "/" + '/'.join(path)
for value in value_list:
try:
val = h.node_get_value(key, value)
val = h.value_value(val)
print " ** ", value, ": ", print_fct(val), " ", write_path
except:
print " ** ", value,": not defined in the Hive file OR no values found"
if value_list:
print " -- No further value listed/requested"
###
### Return list of subkey of a given node (called for type=list)
###
def get_list_child (hive, path):
result = []
h = hivex.Hivex(hive)
key = get_node(hive, path)
try:
for element in h.node_children(key):
result.append(h.node_name(element))
except:
print " That path doesn't exist in the provided Hive file"
return result
###
### Drill down to a specific node and return it
###
def get_node (hive, path):
h = hivex.Hivex(hive)
key = h.root()
i = 0
try:
for iter in path:
key = h.node_get_child(key, iter)
except:
print " That path doesn't exist in the provided Hive file"
return None
return key
###
### Boolean function indicating if it is necessary to explore children of the current node
###
def recursive_search (category, item, OS):
f = open(conf_file)
tree = ElementTree.parse(f)
for node in tree.findall('./' + category + '/' + item + '/key'):
key = node.attrib.get('OS')
if key == OS:
string = node.attrib.get('visit_children')
break
if string:
return int(string)
return 0
###
### Boolean function indicating if it is necessary to print the last written time of the node
###
def timestamp_on(category, item, OS):
f = open(conf_file)
tree = ElementTree.parse(f)
for node in tree.findall('./' + category + '/' + item + '/key'):
key = node.attrib.get('OS')
if key == OS:
string = node.attrib.get('timestamp')
break
if string:
return int(string)
return 0
###
### goes through all children from a given level
###
def recursive_children(level, hive, path, value_list, timestamp):
h = hivex.Hivex(hive)
key = get_node(hive, path)
if key is None:
print " That item does not exist OR the conf doesn't define it for that OS"
return
if level == 0:
print " **", h.node_name(key)
if len(value_list) is not 0:
get_value(hive, path, value_list, timestamp)
print
return
if level == 1:
print " **", h.node_name(key)
for element in h.node_children(key):
tmp = list(path)
tmp.append(h.node_name(element))
recursive_children(level - 1, hive , tmp, value_list, timestamp)
###
### Seek & print the information
###
def print_information (category, item, OS):
hive = get_hive_location(category, item, OS)
if not hive:
print " That item does not exist OR the conf doesn't define it for that OS"
print
return
values_list = get_values_list (category, item, OS)
path = get_key_list(category, item, OS)
recursive = recursive_search(category, item, OS)
timestamp = timestamp_on(category, item, OS)
## Getting and Printing information from each subcategory
if recursive:
recursive_children(recursive, hive, path, values_list, timestamp)
else:
get_value(hive, path, values_list, timestamp)
print
###
### If the OS has not been given in the CL, it checks in software hive for it
###
def autodetect_os(hive_dir):
try:
h = hivex.Hivex(hive_dir+"/software")
hive = "/software"
except:
try:
h = hivex.Hivex(hive_dir+"/SOFTWARE")
hive="/SOFTWARE"
except:
print "No OS specified and unable to automatically find it... Please check the Hive provided"
return
key = get_node(hive_dir+hive, ["Microsoft", "Windows NT","CurrentVersion"])
val = h.node_get_value(key,"ProductName")
val = h.value_value(val)
print
if "X\x00P" in val[1]:
print "OS detected: WinXP"
return "WinXP"
if "V\x00i\x00s\x00t\x00a" in val[1]:
print "OS detected: Vista"
return "Vista"
if "\x007\x00" in val[1]:
print "OS detected: Win7"
return "Win7"
if "\x008\x00" in val[1]:
print "OS detected: Win8"
return "Win8"
###
### Check the path given in arguments are valid before launching anything.
###
def argument_check(categories, conf_file, hive_dir, system_type):
try:
f = open(conf_file)
except:
print "Could not find a conf file at the location", conf_file
return 0
if not os.path.isdir(hive_dir):
print "Could not find the directory", hive_dir
return 0
f = open(conf_file)
tree = ElementTree.parse(f)
if categories:
for category in categories:
node = tree.findall('./' + category)
if not node:
print "That category \"", category, "\" is not defined in the XML file"
return 0
if system_type is None:
print "You did not provide the OS name and FRP was not able to identify it automatically"
return 0
return 1
###
### If no categories are given in arguments, go over the XML file and add each category to the list.
###
def build_list_category(categories):
global conf_file
if categories: return categories
categories = []
f = open(conf_file)
tree = ElementTree.parse(f)
root = tree.getroot()
for child_of_root in root:
categories.append(child_of_root.tag)
return categories
###
### Build the list of children node within the category from the XML file
###
def build_item_list(category):
global conf_file
item_list = []
f = open(conf_file)
tree = ElementTree.parse(f)
root = tree.getroot()
for item in tree.find(category):
item_list.append(item.tag)
return item_list
###
### Return all usernames found in the SAM file as a list of string
###
def build_user_list(hive_dir):
user_list = []
try:
h = hivex.Hivex(hive_dir+"/SAM")
hive = "/SAM"
except:
try:
h = hivex.Hivex(hive_dir+"/sam")
hive="/sam"
except:
print "Did not find a SAM hive file therefore cannot build the user_list"
key = get_node(hive_dir+hive, ["SAM", "Domains", "Account","Users", "Names"])
for element in h.node_children(key):
user_list.append(h.node_name(element))
return user_list
class MyOption(Option):
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
value = value.split(",")
values.ensure_value(dest, []).extend(value)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
VERSION='0.2'
def main ():
PROG = os.path.basename(os.path.splitext(__file__)[0])
description = """Flexible Registry Parser"""
## Command Line Options
parser = OptionParser(option_class=MyOption, usage='usage: %prog -O OS [OPTIONS]', version='%s %s' % (PROG, VERSION), description=description)
parser.add_option('-c', '--categories', action="extend", type="string", dest='categories', metavar='CATEGORIES', help='comma separated list of categories')
parser.add_option('-d', '--directory', type="string", action="store", dest="directory", metavar='PATH', help="Directory where the hive files are located. Default is CWD")
# parser.add_option('-l', '--list-category', dest="list", metavar='LIST', action="store_true", help="Prints all category lookable in the conf file")
parser.add_option('-u', '--users', action="extend", type="string", dest='users', metavar='USERS', help='List of user to inspect')
parser.add_option('-f', '--file-conf', type="string", dest="conf_file", metavar='PATH', help="XML file path. Default is CWD/conf.xml")
parser.add_option('-O', '--OS', type="choice", dest="OS", choices=["WinXP", "Vista", "Win7", "Win8"], help='Type of OS the hive file has been collected from')
parser.add_option('-v', '--verbose', type="choice", dest="VERB", choices=["0","1","2","3",], help='define the level of verbosity.\n0 = default\n1 = adds path to the key\n2 = adds \"Last modified time\"\n3 = adds both')
OPTIONS, args = parser.parse_args()
if len(sys.argv) == 1:
parser.parse_args(['--help'])
global hive_dir
global VERBOSITY
global conf_file
system_type = OPTIONS.OS
categories = OPTIONS.categories
users = OPTIONS.users
conf_file = OPTIONS.conf_file
hive_dir = OPTIONS.directory
VERBOSITY = OPTIONS.VERB
if conf_file is None:
conf_file = os.getcwd()+"/conf.xml"
if hive_dir is None:
hive_dir = os.getcwd()
if VERBOSITY is None:
VERBOSITY = 0
if system_type is None:
system_type = autodetect_os(hive_dir)
if not argument_check(categories, conf_file, hive_dir, system_type):
print "Exiting...."
return 0
categories = build_list_category(categories)
user_list = build_user_list(hive_dir)
#print user_list
for category in categories:
print
print " **************************************"
print " ", category
print " **************************************"
print
item_list = build_item_list(category)
for item in item_list:
print_information(category, item, system_type)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
##########################################################
##########################################################
## ##
## Flexible Registry Parser ##
## Author: Paul-Henri Huckel ##
## Contact: @FrOzPolow ##
## ##
##########################################################
##########################################################
#TODO: Manage user list
#TODO: MRUList formating
import hivex
import sys
import getopt
import re
import os
import codecs
import binascii
import datetime
from xml.etree import ElementTree
from optparse import OptionParser, Option, OptionValueError
VERBOSITY = 0
###
### Return path of the hive file
###
###
def get_hive_location(category, item, OS):
print item
try:
f = open(conf_file)
tree = ElementTree.parse(f)
except:
print " Something is wrong with the conf file path given"
return
for node in tree.findall('./' + category + '/' + item + '/key'):
key = node.attrib.get('OS')
if key == OS:
string = node.attrib.get('Hive')
try:
f2 = open(hive_dir +"/" + string)
except:
print " Something is wrong with the hive file, please check the conf file at line", OS, category, item
return
return hive_dir +"/" + string
return
###
### print & Format output according to the data type
### val: list - val[0]=type, val[1]=value
### return: string - value in val[1]
###
def print_fct ( val ):
if val:
# string value
if val[0] == 1:
return val[1]
# expandable string value
if val[0] == 2:
return val[1]
# Binary data
if val[0] == 3:
# return "The following is a REG_BINARY value, consider decoding the Hex value: "+''.join(val[1]).encode('hex')
return ''.join(val[1]).encode('hex')
# Little Indian 32bits unsigned integer
if val[0] == 4:
return ''.join(val[1]).encode('hex')
# Big Indian 32bits unsigned integer
if val[0] == 5:
return ''.join(reversed(val[1])).encode('hex')
# Long String
if val[0] == 7:
return val[1]
else:
return " This key is not defined within the hive file"
return "VALUE TYPE NOT SUPPORTED"
###
### Looks in the XML file and return a list of each subkey leading to the node requested
### category: String
### item: string
### OS: string
### return: list
###
def get_key_list (category, item, OS, username = ""):
i = 0
string = ""
f = open(conf_file)
tree = ElementTree.parse(f)
for node in tree.findall('./' + category + '/' + item + '/key'):
key = node.attrib.get('OS')
if key == OS:
string = node.attrib.get('path')
break
key_path = string.split('\\', -1)
for key in key_path:
if key == "CurrentControlSet":
currentcontrolset = get_currentcontrolset()
key_path[i] = "ControlSet00" + currentcontrolset
i = i + 1
return key_path
###
### Builds the list of values requested in the conf file
### category: String
### item: string
### OS: string
### return: list
###
def get_values_list ( category, item, OS ):
values_list = []
string = ""
f = open(conf_file)
tree = ElementTree.parse(f)
for node in tree.findall('./' + category + '/' + item + '/key'):
key = node.attrib.get('OS')
if key == OS:
string = node.attrib.get('values')
break
if string:
values_list = string.split(',', -1)
return values_list
###
### Look which controlset is the one currently in use
### return: string - controlset
###
def get_currentcontrolset ():
global hive_dir
try:
h = hivex.Hivex(hive_dir+"/system")
hive = "/system"
except:
try:
h = hivex.Hivex(hive_dir+"/SYSTEM")
hive="/SYSTEM"
except:
print "Could not find the current controlset... Did you provide the System hive?"
return
system_hive = hive_dir+hive
h = hivex.Hivex(system_hive)
key = h.root()
key = h.node_get_child(key, "Select")
val = h.node_get_value(key, "Current")
val = h.value_value(val)
return str(int(''.join(reversed(val[1])).encode('hex'), 16))
###
### Return modified time of a key
###
def get_modified_time (hive, path):
h = hivex.Hivex(hive)
key = get_node(hive, path)
time = h .node_timestamp(key)
time = int((time / 10000000) - 11644473600)
return datetime.datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')
###
### Print single value found of a single key found in path
### adds the last written time & path depending on the verbosity set by the user
###
def get_value (hive, path, value_list, timestamp):
global VERBOSITY
h = hivex.Hivex(hive)
key = get_node(hive, path)
i = 0
write_path = ""
if not key:
print "Key not defined in the Hive file Or no values found"
if value_list and value_list[0] == "*":
value_list = h.node_values(key)
while i < len(value_list):
value_list[i] = h.value_key(value_list[i])
i = i + 1
if (timestamp == 1) or (int(VERBOSITY) >= 2):
print " Last Written Time: " + get_modified_time(hive, path)
if (int(VERBOSITY) == 1) or (int(VERBOSITY) == 3):
write_path = hive + "/" + '/'.join(path)
for value in value_list:
try:
val = h.node_get_value(key, value)
val = h.value_value(val)
print " ** ", value, ": ", print_fct(val), " ", write_path
except:
print " ** ", value,": not defined in the Hive file OR no values found"
if value_list:
print " -- No further value listed/requested"
###
### Return list of subkey of a given node (called for type=list)
###
def get_list_child (hive, path):
result = []
h = hivex.Hivex(hive)
key = get_node(hive, path)
try:
for element in h.node_children(key):
result.append(h.node_name(element))
except:
print " That path doesn't exist in the provided Hive file"
return result
###
### Drill down to a specific node and return it
###
def get_node (hive, path):
h = hivex.Hivex(hive)
key = h.root()
i = 0
try:
for iter in path:
key = h.node_get_child(key, iter)
except:
print " That path doesn't exist in the provided Hive file"
return None
return key
###
### Boolean function indicating if it is necessary to explore children of the current node
###
def recursive_search (category, item, OS):
f = open(conf_file)
tree = ElementTree.parse(f)
for node in tree.findall('./' + category + '/' + item + '/key'):
key = node.attrib.get('OS')
if key == OS:
string = node.attrib.get('visit_children')
break
if string:
return int(string)
return 0
###
### Boolean function indicating if it is necessary to print the last written time of the node
###
def timestamp_on(category, item, OS):
f = open(conf_file)
tree = ElementTree.parse(f)
for node in tree.findall('./' + category + '/' + item + '/key'):
key = node.attrib.get('OS')
if key == OS:
string = node.attrib.get('timestamp')
break
if string:
return int(string)
return 0
###
### goes through all children from a given level
###
def recursive_children(level, hive, path, value_list, timestamp):
h = hivex.Hivex(hive)
key = get_node(hive, path)
if key is None:
print " That item does not exist OR the conf doesn't define it for that OS"
return
if level == 0:
print " **", h.node_name(key)
if len(value_list) is not 0:
get_value(hive, path, value_list, timestamp)
print
return
if level == 1:
print " **", h.node_name(key)
for element in h.node_children(key):
tmp = list(path)
tmp.append(h.node_name(element))
recursive_children(level - 1, hive , tmp, value_list, timestamp)
###
### Seek & print the information
###
def print_information (category, item, OS):
hive = get_hive_location(category, item, OS)
if not hive:
print " That item does not exist OR the conf doesn't define it for that OS"
print
return
values_list = get_values_list (category, item, OS)
path = get_key_list(category, item, OS)
recursive = recursive_search(category, item, OS)
timestamp = timestamp_on(category, item, OS)
## Getting and Printing information from each subcategory
if recursive:
recursive_children(recursive, hive, path, values_list, timestamp)
else:
get_value(hive, path, values_list, timestamp)
print
###
### If the OS has not been given in the CL, it checks in software hive for it
###
def autodetect_os(hive_dir):
try:
h = hivex.Hivex(hive_dir+"/software")
hive = "/software"
except:
try:
h = hivex.Hivex(hive_dir+"/SOFTWARE")
hive="/SOFTWARE"
except:
print "No OS specified and unable to automatically find it... Please check the Hive provided"
return
key = get_node(hive_dir+hive, ["Microsoft", "Windows NT","CurrentVersion"])
val = h.node_get_value(key,"ProductName")
val = h.value_value(val)
print
if "X\x00P" in val[1]:
print "OS detected: WinXP"
return "WinXP"
if "V\x00i\x00s\x00t\x00a" in val[1]:
print "OS detected: Vista"
return "Vista"
if "\x007\x00" in val[1]:
print "OS detected: Win7"
return "Win7"
if "\x008\x00" in val[1]:
print "OS detected: Win8"
return "Win8"
###
### Check the path given in arguments are valid before launching anything.
###
def argument_check(categories, conf_file, hive_dir, system_type):
try:
f = open(conf_file)
except:
print "Could not find a conf file at the location", conf_file
return 0
if not os.path.isdir(hive_dir):
print "Could not find the directory", hive_dir
return 0
f = open(conf_file)
tree = ElementTree.parse(f)
if categories:
for category in categories:
node = tree.findall('./' + category)
if not node:
print "That category \"", category, "\" is not defined in the XML file"
return 0
if system_type is None:
print "You did not provide the OS name and FRP was not able to identify it automatically"
return 0
return 1
###
### If no categories are given in arguments, go over the XML file and add each category to the list.
###
def build_list_category(categories):
global conf_file
if categories: return categories
categories = []
f = open(conf_file)
tree = ElementTree.parse(f)
root = tree.getroot()
for child_of_root in root:
categories.append(child_of_root.tag)
return categories
###
### Build the list of children node within the category from the XML file
###
def build_item_list(category):
global conf_file
item_list = []
f = open(conf_file)
tree = ElementTree.parse(f)
root = tree.getroot()
for item in tree.find(category):
item_list.append(item.tag)
return item_list
###
### Return all usernames found in the SAM file as a list of string
###
def build_user_list(hive_dir):
user_list = []
try:
h = hivex.Hivex(hive_dir+"/SAM")
hive = "/SAM"
except:
try:
h = hivex.Hivex(hive_dir+"/sam")
hive="/sam"
except:
print "Did not find a SAM hive file therefore cannot build the user_list"
key = get_node(hive_dir+hive, ["SAM", "Domains", "Account","Users", "Names"])
for element in h.node_children(key):
user_list.append(h.node_name(element))
return user_list
class MyOption(Option):
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
value = value.split(",")
values.ensure_value(dest, []).extend(value)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
VERSION='0.2'
def main ():
PROG = os.path.basename(os.path.splitext(__file__)[0])
description = """Flexible Registry Parser"""
## Command Line Options
parser = OptionParser(option_class=MyOption, usage='usage: %prog -O OS [OPTIONS]', version='%s %s' % (PROG, VERSION), description=description)
parser.add_option('-c', '--categories', action="extend", type="string", dest='categories', metavar='CATEGORIES', help='comma separated list of categories')
parser.add_option('-d', '--directory', type="string", action="store", dest="directory", metavar='PATH', help="Directory where the hive files are located. Default is CWD")
# parser.add_option('-l', '--list-category', dest="list", metavar='LIST', action="store_true", help="Prints all category lookable in the conf file")
parser.add_option('-u', '--users', action="extend", type="string", dest='users', metavar='USERS', help='List of user to inspect')
parser.add_option('-f', '--file-conf', type="string", dest="conf_file", metavar='PATH', help="XML file path. Default is CWD/conf.xml")
parser.add_option('-O', '--OS', type="choice", dest="OS", choices=["WinXP", "Vista", "Win7", "Win8"], help='Type of OS the hive file has been collected from')
parser.add_option('-v', '--verbose', type="choice", dest="VERB", choices=["0","1","2","3",], help='define the level of verbosity.\n0 = default\n1 = adds path to the key\n2 = adds \"Last modified time\"\n3 = adds both')
OPTIONS, args = parser.parse_args()
if len(sys.argv) == 1:
parser.parse_args(['--help'])
global hive_dir
global VERBOSITY
global conf_file
system_type = OPTIONS.OS
categories = OPTIONS.categories
users = OPTIONS.users
conf_file = OPTIONS.conf_file
hive_dir = OPTIONS.directory
VERBOSITY = OPTIONS.VERB
if conf_file is None:
conf_file = os.getcwd()+"/conf.xml"
if hive_dir is None:
hive_dir = os.getcwd()
if VERBOSITY is None:
VERBOSITY = 0
if system_type is None:
system_type = autodetect_os(hive_dir)
if not argument_check(categories, conf_file, hive_dir, system_type):
print "Exiting...."
return 0
categories = build_list_category(categories)
user_list = build_user_list(hive_dir)
#print user_list
for category in categories:
print
print " **************************************"
print " ", category
print " **************************************"
print
item_list = build_item_list(category)
for item in item_list:
print_information(category, item, system_type)
if __name__ == '__main__':
main()
| Python |
import kf_lib_v2_2 as lib
import math
headings = "N,NNE,NE,ENE,E,ESE,SE,SSE,S,SSW,SW,WSW,W,WNW,NW,NNW".split(",")
def deg_to_bearing(deg):
divisions= 360.0 / len(headings)
return headings[int(((deg + divisions/2) % 360)/divisions)]
if __name__ == "__main__":
for deg in range(365): print deg, deg_to_bearing(deg) | Python |
import kf_lib_v2_2 as lib
import sqlite3
import weather
class Flightplanner:
def __init__(self, route):
#help(sqlite3.connect)
self.aopa = sqlite3.connect("Source Data\\aopa.sqlite")
self.c=self.aopa.cursor()
#help(self.aopa.cursor)
#help(self.c.execute)
self.res = self.c.execute("select * from airports")
colnames = [col[0] for col in self.c.description]
self.airports = dict([(k[1], dict(zip(colnames, k))) for k in self.res])
self.wx = weather.weather()
fp = Flightplanner("KRFD DEN") | Python |
# The lines up to and including sys.stderr should always come first
# Then any errors that occur later get reported to the console
# If you'd prefer to report errors to a file, you can do that instead here.
import sys
from Npp import *
# Set the stderr to the normal console as early as possible, in case of early errors
sys.stderr = console
# Define a class for writing to the console in red
class ConsoleError:
def __init__(self):
global console
self._console = console;
def write(self, text):
self._console.writeError(text);
def flush(self):
pass
# Set the stderr to write errors in red
sys.stderr = ConsoleError()
# This imports the "normal" functions, including "help"
import site
# This sets the stdout to be the currently active document, so print "hello world",
# will insert "hello world" at the current cursor position of the current document
sys.stdout = editor
| Python |
import datetime
# Just in case, we'll clear all the existing callbacks for FILEBEFORESAVE
notepad.clearCallbacks([NOTIFICATION.FILEBEFORESAVE])
# Define the function to call just before the file is saved
def addSaveStamp(args):
if notepad.getBufferFilename(args["bufferID"])[-4:] == '.log':
if MESSAGEBOXFLAGS.RESULTYES == notepad.messageBox('Hello from Python Script... Would you like to append the date to this log file?', 'Python Script', MESSAGEBOXFLAGS.YESNO):
notepad.activateBufferID(args["bufferID"])
editor.appendText("File saved on %s\r\n" % datetime.date.today())
if MESSAGEBOXFLAGS.RESULTYES == notepad.messageBox("Would you like to cancel this callback, so you don't get asked again?", "Python Script", MESSAGEBOXFLAGS.YESNO):
notepad.clearCallbacks([NOTIFICATION.FILEBEFORESAVE])
# ... and register the callback
notepad.callback(addSaveStamp, [NOTIFICATION.FILEBEFORESAVE])
# As this is a sample, we'll inform the user
notepad.messageBox("FILEBEFORESAVE notification registered.\n*.log files will now automatically be modified before saving.\nCtrl-Click the script to edit.\n", "Python Script Demo", 0)
| Python |
# Disable the virtual space options for both Scintilla views, apart from for rectangular selection (the default in Notepad++)
# For more information, see the Scintilla documentation on virtual space and the SCI_SETVIRTUALSPACEOPTIONS message.
editor1.setVirtualSpaceOptions(1)
editor2.setVirtualSpaceOptions(1)
| Python |
# Enable the virtual space options for both Scintilla views
# For more information, see the Scintilla documentation on virtual space and the SCI_SETVIRTUALSPACEOPTIONS message.
editor1.setVirtualSpaceOptions(3)
editor2.setVirtualSpaceOptions(3)
| Python |
# First we'll start an undo action, then Ctrl-Z will undo the actions of the whole script
editor.beginUndoAction()
# Do a Python regular expression replace, full support of Python regular expressions
# This replaces any three uppercase letters that are repeated,
# so ABDABD, DEFDEF or DGBDGB etc. the first 3 characters,
# so ABD, DEF and DGB in these cases.
editor.pyreplace(r"([A-Z]{3})\1", r"\1")
# Do a multi-line Python regular expression replace.
# This example replaces any <br/> that is followed by another on the next line (with optional spaces in between), with a single one
editor.pymlreplace(r"<br/>\s*\r\n\s*<br/>", "<br/>\r\n")
# End the undo action, so Ctrl-Z will undo the above two actions
editor.endUndoAction() | Python |
def testContents(contents, lineNumber, totalLines):
if contents.strip() == "rubbish":
editor.deleteLine(lineNumber)
# As we've deleted the line, the "next" line to process
# is actually the current line, so we return 0 to advance zero lines
# and hence stay on the same line
return 0
elif contents.strip() == "something old":
editor.replaceLine(lineNumber, "something new")
elif contents.strip() == "little something":
editor.replaceLine(lineNumber, "BIG\nSOMETHING"
# Here we return 2, as we've inserted a newline,
# and we don't want to test the "SOMETHING" line again
return 2
# if you wanted, you could optionally return 1 here, to move the next line
# but that's the default, so you don't need to bother.
editor.forEachLine(testContents)
# Inform the user that we've done it
notepad.messageBox("Replacements and removals completed", "Python Script Demo", 0) | Python |
"""Python part of the warnings subsystem."""
# Note: function level imports should *not* be used
# in this module as it may cause import lock deadlock.
# See bug 683658.
import linecache
import sys
import types
__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
"resetwarnings", "catch_warnings"]
def warnpy3k(message, category=None, stacklevel=1):
"""Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option.
"""
if sys.py3kwarning:
if category is None:
category = DeprecationWarning
warn(message, category, stacklevel+1)
def _show_warning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
try:
file.write(formatwarning(message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
# Keep a working version around in case the deprecation of the old API is
# triggered.
showwarning = _show_warning
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=0):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, basestring), "message must be a string"
assert isinstance(category, (type, types.ClassType)), \
"category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, basestring), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=0):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError, msg:
print >>sys.stderr, "Invalid -W option ignored:", msg
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, default_action, once_registry,
warn, warn_explicit)
defaultaction = default_action
onceregistry = once_registry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
# Don't silence DeprecationWarning if -3 or -Q was used.
if not sys.py3kwarning and not sys.flags.division_warning:
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
del _warnings_defaults
| Python |
"""General floating point formatting functions.
Functions:
fix(x, digits_behind)
sci(x, digits_behind)
Each takes a number or a string and a number of digits as arguments.
Parameters:
x: number to be formatted; or a string resembling a number
digits_behind: number of digits behind the decimal point
"""
from warnings import warnpy3k
warnpy3k("the fpformat module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import re
__all__ = ["fix","sci","NotANumber"]
# Compiled regular expression to "decode" a number
decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
# \0 the whole thing
# \1 leading sign or empty
# \2 digits left of decimal point
# \3 fraction (empty or begins with point)
# \4 exponent part (empty or begins with 'e' or 'E')
try:
class NotANumber(ValueError):
pass
except TypeError:
NotANumber = 'fpformat.NotANumber'
def extract(s):
"""Return (sign, intpart, fraction, expo) or raise an exception:
sign is '+' or '-'
intpart is 0 or more digits beginning with a nonzero
fraction is 0 or more digits
expo is an integer"""
res = decoder.match(s)
if res is None: raise NotANumber, s
sign, intpart, fraction, exppart = res.group(1,2,3,4)
if sign == '+': sign = ''
if fraction: fraction = fraction[1:]
if exppart: expo = int(exppart[1:])
else: expo = 0
return sign, intpart, fraction, expo
def unexpo(intpart, fraction, expo):
"""Remove the exponent by changing intpart and fraction."""
if expo > 0: # Move the point left
f = len(fraction)
intpart, fraction = intpart + fraction[:expo], fraction[expo:]
if expo > f:
intpart = intpart + '0'*(expo-f)
elif expo < 0: # Move the point right
i = len(intpart)
intpart, fraction = intpart[:expo], intpart[expo:] + fraction
if expo < -i:
fraction = '0'*(-expo-i) + fraction
return intpart, fraction
def roundfrac(intpart, fraction, digs):
"""Round or extend the fraction to size digs."""
f = len(fraction)
if f <= digs:
return intpart, fraction + '0'*(digs-f)
i = len(intpart)
if i+digs < 0:
return '0'*-digs, ''
total = intpart + fraction
nextdigit = total[i+digs]
if nextdigit >= '5': # Hard case: increment last digit, may have carry!
n = i + digs - 1
while n >= 0:
if total[n] != '9': break
n = n-1
else:
total = '0' + total
i = i+1
n = 0
total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
intpart, fraction = total[:i], total[i:]
if digs >= 0:
return intpart, fraction[:digs]
else:
return intpart[:digs] + '0'*-digs, ''
def fix(x, digs):
"""Format x as [-]ddd.ddd with 'digs' digits after the point
and at least one digit before.
If digs <= 0, the point is suppressed."""
if type(x) != type(''): x = repr(x)
try:
sign, intpart, fraction, expo = extract(x)
except NotANumber:
return x
intpart, fraction = unexpo(intpart, fraction, expo)
intpart, fraction = roundfrac(intpart, fraction, digs)
while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart
def sci(x, digs):
"""Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
and exactly one digit before.
If digs is <= 0, one digit is kept and the point is suppressed."""
if type(x) != type(''): x = repr(x)
sign, intpart, fraction, expo = extract(x)
if not intpart:
while fraction and fraction[0] == '0':
fraction = fraction[1:]
expo = expo - 1
if fraction:
intpart, fraction = fraction[0], fraction[1:]
expo = expo - 1
else:
intpart = '0'
else:
expo = expo + len(intpart) - 1
intpart, fraction = intpart[0], intpart[1:] + fraction
digs = max(0, digs)
intpart, fraction = roundfrac(intpart, fraction, digs)
if len(intpart) > 1:
intpart, fraction, expo = \
intpart[0], intpart[1:] + fraction[:-1], \
expo + len(intpart) - 1
s = sign + intpart
if digs > 0: s = s + '.' + fraction
e = repr(abs(expo))
e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e
def test():
"""Interactive test run."""
try:
while 1:
x, digs = input('Enter (x, digs): ')
print x, fix(x, digs), sci(x, digs)
except (EOFError, KeyboardInterrupt):
pass
| Python |
r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError, err:
pass
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError, err1:
pass
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError, err2:
pass
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise SyntaxError, err1
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
| Python |
"""Provide a (g)dbm-compatible interface to bsddb.hashopen."""
import sys
import warnings
warnings.warnpy3k("in 3.x, the dbhash module has been removed", stacklevel=2)
try:
import bsddb
except ImportError:
# prevent a second import of this module from spuriously succeeding
del sys.modules[__name__]
raise
__all__ = ["error","open"]
error = bsddb.error # Exported for anydbm
def open(file, flag = 'r', mode=0666):
return bsddb.hashopen(file, flag, mode)
| Python |
"""Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
__version__ = "$Revision$" # Code version
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
import re
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# These are purely informational; no code uses these.
format_version = "2.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
] # Old format versions we can read
# Keep in synch with cPickle. This is the highest protocol number we
# know how to read.
HIGHEST_PROTOCOL = 2
# Why use struct.pack() for pickling but marshal.loads() for
# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
# marshal.loads() is twice as fast as struct.unpack()!
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a' # append stack top to list below it
BUILD = 'b' # call __setstate__ or __dict__.update()
GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
DICT = 'd' # build a dict from stack items
EMPTY_DICT = '}' # push empty dict
APPENDS = 'e' # extend list on stack by topmost stack slice
GET = 'g' # push item from memo on stack; index is string arg
BINGET = 'h' # " " " " " " ; " " 1-byte arg
INST = 'i' # build & push class instance
LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
LIST = 'l' # build list from topmost stack items
EMPTY_LIST = ']' # push empty list
OBJ = 'o' # build & push class instance
PUT = 'p' # store stack top in memo; index is string arg
BINPUT = 'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's' # add key+value pair to dict
TUPLE = 't' # build tuple from topmost stack items
EMPTY_TUPLE = ')' # push empty tuple
SETITEMS = 'u' # modify dict by adding topmost key+value pairs
BINFLOAT = 'G' # push float; arg is 8-byte float encoding
TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = '\x80' # identify pickle protocol
NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
EXT1 = '\x82' # push object from extension registry; 1-byte index
EXT2 = '\x83' # ditto, but 2-byte index
EXT4 = '\x84' # ditto, but 4-byte index
TUPLE1 = '\x85' # build 1-tuple from stack top
TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
NEWTRUE = '\x88' # push True
NEWFALSE = '\x89' # push False
LONG1 = '\x8a' # push long from < 256 bytes
LONG4 = '\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$",x)])
del x
# Pickling machinery
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
if self.proto >= 2:
self.write(PROTO + chr(self.proto))
self.save(obj)
self.write(STOP)
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = memo_len, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINPUT + chr(i)
else:
return LONG_BINPUT + pack("<i", i)
return PUT + repr(i) + '\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINGET + chr(i)
else:
return LONG_BINGET + pack("<i", i)
return GET + repr(i) + '\n'
def save(self, obj):
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f:
f(self, obj) # Call unbound method with explicit self
return
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, TypeType)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = 0
if issc:
self.save_global(obj)
return
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid)
self.write(BINPERSID)
else:
self.write(PERSID + str(pid) + '\n')
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
# This API is called by some subclasses
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
# A __reduce__ implementation can direct protocol 2 to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# new-style class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[NoneType] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(obj and NEWTRUE or NEWFALSE)
else:
self.write(obj and TRUE or FALSE)
dispatch[bool] = save_bool
def save_int(self, obj, pack=struct.pack):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + chr(obj))
return
if obj <= 0xffff:
self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
return
# Next check for 4-byte signed ints:
high_bits = obj >> 31 # note that Python shift sign-extends
if high_bits == 0 or high_bits == -1:
# All high bits are copies of bit 2**31, so the value
# fits in a 4-byte signed int.
self.write(BININT + pack("<i", obj))
return
# Text pickle, or int too big to fit in signed 4-byte format.
self.write(INT + repr(obj) + '\n')
dispatch[IntType] = save_int
def save_long(self, obj, pack=struct.pack):
if self.proto >= 2:
bytes = encode_long(obj)
n = len(bytes)
if n < 256:
self.write(LONG1 + chr(n) + bytes)
else:
self.write(LONG4 + pack("<i", n) + bytes)
return
self.write(LONG + repr(obj) + '\n')
dispatch[LongType] = save_long
def save_float(self, obj, pack=struct.pack):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj) + '\n')
dispatch[FloatType] = save_float
def save_string(self, obj, pack=struct.pack):
if self.bin:
n = len(obj)
if n < 256:
self.write(SHORT_BINSTRING + chr(n) + obj)
else:
self.write(BINSTRING + pack("<i", n) + obj)
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_unicode(self, obj, pack=struct.pack):
if self.bin:
encoding = obj.encode('utf-8')
n = len(encoding)
self.write(BINUNICODE + pack("<i", n) + encoding)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
self.memoize(obj)
dispatch[UnicodeType] = save_unicode
if StringType is UnicodeType:
# This is true for Jython
def save_string(self, obj, pack=struct.pack):
unicode = obj.isunicode()
if self.bin:
if unicode:
obj = obj.encode("utf-8")
l = len(obj)
if l < 256 and not unicode:
self.write(SHORT_BINSTRING + chr(l) + obj)
else:
s = pack("<i", l)
if unicode:
self.write(BINUNICODE + s + obj)
else:
self.write(BINSTRING + s + obj)
else:
if unicode:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
obj = obj.encode('raw-unicode-escape')
self.write(UNICODE + obj + '\n')
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
self.write(TUPLE)
self.memoize(obj)
dispatch[TupleType] = save_tuple
# save_empty_tuple() isn't used by anything in Python 2.3. However, I
# found a Pickler subclass in Zope3 that calls it, so it's not harmless
# to remove it.
def save_empty_tuple(self, obj):
self.write(EMPTY_TUPLE)
def save_list(self, obj):
write = self.write
if self.bin:
write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
write(MARK + LIST)
self.memoize(obj)
self._batch_appends(iter(obj))
dispatch[ListType] = save_list
# Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
# out of synch, though.
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
x = items.next()
tmp.append(x)
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
def save_dict(self, obj):
write = self.write
if self.bin:
write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.iteritems())
dispatch[DictionaryType] = save_dict
if not PyStringMap is None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
def save_inst(self, obj):
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
else:
args = ()
write(MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(OBJ)
else:
for arg in args:
save(arg)
write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if self.proto >= 2:
code = _extension_registry.get((module, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (EXT2, code&0xff, code>>8))
else:
write(EXT4 + pack("<i", code))
return
write(GLOBAL + module + '\n' + name + '\n')
self.memoize(obj)
dispatch[ClassType] = save_global
dispatch[FunctionType] = save_global
dispatch[BuiltinFunctionType] = save_global
dispatch[TypeType] = save_global
# Pickling helpers
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
# A cache for whichmodule(), mapping a function object to the name of
# the module in which the function was found.
classmap = {} # called classmap for backwards compatibility
def whichmodule(func, funcname):
"""Figure out the module in which a function occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the function cannot be found, return "__main__".
"""
# Python functions should always get an __module__ from their globals.
mod = getattr(func, "__module__", None)
if mod is not None:
return mod
if func in classmap:
return classmap[func]
for name, module in sys.modules.items():
if module is None:
continue # skip dummy package entries
if name != '__main__' and getattr(module, funcname, None) is func:
break
else:
name = '__main__'
classmap[func] = name
return name
# Unpickling machinery
class Unpickler:
def __init__(self, file):
"""This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.
"""
self.readline = file.readline
self.read = file.read
self.memo = {}
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
self.mark = object() # any new unique object
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop, stopinst:
return stopinst.value
# Return largest index k such that self.stack[k] is self.mark.
# If the stack doesn't contain a mark, eventually raises IndexError.
# This could be sped by maintaining another stack, of indices at which
# the mark appears. For that matter, the latter stack would suffice,
# and we wouldn't need to push mark objects on self.stack at all.
# Doing so is probably a good thing, though, since if the pickle is
# corrupt (or hostile) we may get a clue from finding self.mark embedded
# in unpickled objects.
def marker(self):
stack = self.stack
mark = self.mark
k = len(stack)-1
while stack[k] is not mark: k = k-1
return k
dispatch = {}
def load_eof(self):
raise EOFError
dispatch[''] = load_eof
def load_proto(self):
proto = ord(self.read(1))
if not 0 <= proto <= 2:
raise ValueError, "unsupported pickle protocol: %d" % proto
dispatch[PROTO] = load_proto
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(pid))
dispatch[PERSID] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
try:
val = int(data)
except ValueError:
val = long(data)
self.append(val)
dispatch[INT] = load_int
def load_binint(self):
self.append(mloads('i' + self.read(4)))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(ord(self.read(1)))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(mloads('i' + self.read(2) + '\000\000'))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(long(self.readline()[:-1], 0))
dispatch[LONG] = load_long
def load_long1(self):
n = ord(self.read(1))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG1] = load_long1
def load_long4(self):
n = mloads('i' + self.read(4))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG4] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
if not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, "insecure string pickle"
self.append(rep.decode("string-escape"))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(self.read(len))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(self.read(len))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [tuple(self.stack[k+1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3] = load_tuple3
def load_empty_list(self):
self.stack.append([])
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append({})
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [self.stack[k+1:]]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = {}
items = self.stack[k+1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i+1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, k):
args = tuple(self.stack[k+1:])
del self.stack[k:]
instantiated = 0
if (not args and
type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
try:
value = klass(*args)
except TypeError, err:
raise TypeError, "in constructor for %s: %s" % (
klass.__name__, str(err)), sys.exc_info()[2]
self.append(value)
def load_inst(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self._instantiate(klass, self.marker())
dispatch[INST] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
k = self.marker()
klass = self.stack.pop(k+1)
self._instantiate(klass, k)
dispatch[OBJ] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
dispatch[NEWOBJ] = load_newobj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL] = load_global
def load_ext1(self):
code = ord(self.read(1))
self.get_extension(code)
dispatch[EXT1] = load_ext1
def load_ext2(self):
code = mloads('i' + self.read(2) + '\000\000')
self.get_extension(code)
dispatch[EXT2] = load_ext2
def load_ext4(self):
code = mloads('i' + self.read(4))
self.get_extension(code)
dispatch[EXT4] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
value = func(*args)
stack[-1] = value
dispatch[REDUCE] = load_reduce
def load_pop(self):
del self.stack[-1]
dispatch[POP] = load_pop
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP] = load_dup
def load_get(self):
self.append(self.memo[self.readline()[:-1]])
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(self.memo[repr(i)])
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(self.memo[repr(i)])
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.memo[self.readline()[:-1]] = self.stack[-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
self.memo[repr(i)] = self.stack[-1]
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
self.memo[repr(i)] = self.stack[-1]
dispatch[LONG_BINPUT] = load_long_binput
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND] = load_append
def load_appends(self):
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
list.extend(stack[mark + 1:])
del stack[mark:]
dispatch[APPENDS] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM] = load_setitem
def load_setitems(self):
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
dispatch[SETITEMS] = load_setitems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
d = inst.__dict__
try:
for k, v in state.iteritems():
d[intern(k)] = v
# keys in state don't have to be strings
# don't blow up, but don't go out of our way
except TypeError:
d.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD] = load_build
def load_mark(self):
self.append(self.mark)
dispatch[MARK] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP] = load_stop
# Helper class for load_inst/load_obj
class _EmptyClass:
pass
# Encode/decode longs in linear time.
import binascii as _binascii
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0L is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0L)
''
>>> encode_long(255L)
'\xff\x00'
>>> encode_long(32767L)
'\xff\x7f'
>>> encode_long(-256L)
'\x00\xff'
>>> encode_long(-32768L)
'\x00\x80'
>>> encode_long(-128L)
'\x80'
>>> encode_long(127L)
'\x7f'
>>>
"""
if x == 0:
return ''
if x > 0:
ashex = hex(x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# need an even # of nibbles for unhexlify
ashex = "0x0" + ashex[2:]
elif int(ashex[2], 16) >= 8:
# "looks negative", so need a byte of sign bits
ashex = "0x00" + ashex[2:]
else:
# Build the 256's-complement: (1L << nbytes) + x. The trick is
# to find the number of bytes in linear time (although that should
# really be a constant-time task).
ashex = hex(-x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# Extend to a full byte.
nibbles += 1
nbits = nibbles * 4
x += 1L << nbits
assert x > 0
ashex = hex(x)
njunkchars = 2 + ashex.endswith('L')
newnibbles = len(ashex) - njunkchars
if newnibbles < nibbles:
ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
if int(ashex[2], 16) < 8:
# "looks positive", so need a byte of sign bits
ashex = "0xff" + ashex[2:]
if ashex.endswith('L'):
ashex = ashex[2:-1]
else:
ashex = ashex[2:]
assert len(ashex) & 1 == 0, (x, ashex)
binary = _binascii.unhexlify(ashex)
return binary[::-1]
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long('')
0L
>>> decode_long("\xff\x00")
255L
>>> decode_long("\xff\x7f")
32767L
>>> decode_long("\x00\xff")
-256L
>>> decode_long("\x00\x80")
-32768L
>>> decode_long("\x80")
-128L
>>> decode_long("\x7f")
127L
"""
nbytes = len(data)
if nbytes == 0:
return 0L
ashex = _binascii.hexlify(data[::-1])
n = long(ashex, 16) # quadratic time before Python 2.3; linear now
if data[-1] >= '\x80':
n -= 1L << (nbytes * 8)
return n
# Shorthands
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def dump(obj, file, protocol=None):
Pickler(file, protocol).dump(obj)
def dumps(obj, protocol=None):
file = StringIO()
Pickler(file, protocol).dump(obj)
return file.getvalue()
def load(file):
return Unpickler(file).load()
def loads(str):
file = StringIO(str)
return Unpickler(file).load()
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| Python |
"""Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = d.has_key(key) # true if the key exists; same as "key in d"
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
# Try using cPickle and cStringIO if available.
try:
from cPickle import Pickler, Unpickler
except ImportError:
from pickle import Pickler, Unpickler
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import UserDict
__all__ = ["Shelf","BsdDbShelf","DbfilenameShelf","open"]
class _ClosedDict(UserDict.DictMixin):
'Marker for a closed dict. Access attempts raise a ValueError.'
def closed(self, *args):
raise ValueError('invalid operation on closed shelf')
__getitem__ = __setitem__ = __delitem__ = keys = closed
def __repr__(self):
return '<Closed Dictionary>'
class Shelf(UserDict.DictMixin):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False):
self.dict = dict
if protocol is None:
protocol = 0
self._protocol = protocol
self.writeback = writeback
self.cache = {}
def keys(self):
return self.dict.keys()
def __len__(self):
return len(self.dict)
def has_key(self, key):
return key in self.dict
def __contains__(self, key):
return key in self.dict
def get(self, key, default=None):
if key in self.dict:
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = StringIO(self.dict[key])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = StringIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key] = f.getvalue()
def __delitem__(self, key):
del self.dict[key]
try:
del self.cache[key]
except KeyError:
pass
def close(self):
self.sync()
try:
self.dict.close()
except AttributeError:
pass
# Catch errors that may happen when close is called from __del__
# because CPython is in interpreter shutdown.
try:
self.dict = _ClosedDict()
except (NameError, TypeError):
self.dict = None
def __del__(self):
if not hasattr(self, 'writeback'):
# __init__ didn't succeed, so don't bother closing
return
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.iteritems():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False):
Shelf.__init__(self, dict, protocol, writeback)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = StringIO(value)
return (key, Unpickler(f).load())
def next(self):
(key, value) = self.dict.next()
f = StringIO(value)
return (key, Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = StringIO(value)
return (key, Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = StringIO(value)
return (key, Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = StringIO(value)
return (key, Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "anydbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
import anydbm
Shelf.__init__(self, anydbm.open(filename, flag), protocol, writeback)
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
anydbm.open(). The optional protocol parameter specifies the
version of the pickle protocol (0, 1, or 2).
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback)
| Python |
# $Id$
#
# Copyright (C) 2005 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
import warnings
warnings.warn("the sha module is deprecated; use the hashlib module instead",
DeprecationWarning, 2)
from hashlib import sha1 as sha
new = sha
blocksize = 1 # legacy value (wrong in any useful sense)
digest_size = 20
digestsize = 20
| Python |
# $Id$
#
# Copyright (C) 2005 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, string='') - returns a new hash object implementing the
given hash function; initializing the hash
using the given string data.
Named constructor functions are also available, these are much faster
than using new():
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are
guaranteed to exist.
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the strings passed to the update() method
so far. This may contain non-ASCII characters, including
NUL bytes.
- hexdigest(): Like digest() except the digest is returned as a string of
double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update("Nobody inspects")
>>> m.update(" the spammish repetition")
>>> m.digest()
'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
algorithms = __always_supported
__all__ = __always_supported + ('new', 'algorithms')
def __get_builtin_constructor(name):
if name in ('SHA1', 'sha1'):
import _sha
return _sha.new
elif name in ('MD5', 'md5'):
import _md5
return _md5.new
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
raise ValueError('unsupported hash type %s' % name)
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
return __get_builtin_constructor(name)(string)
def __hash_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
try:
return _hashlib.new(name, string)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(string)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
except ImportError:
new = __py_new
__get_hash = __get_builtin_constructor
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
try:
globals()[__func_name] = __get_hash(__func_name)
except ValueError:
import logging
logging.exception('code for hash %s was not found.', __func_name)
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor
| Python |
"""Find modules used by a script, using introspection."""
# This module should be kept compatible with Python 2.2, see PEP 291.
from __future__ import generators
import dis
import imp
import marshal
import os
import sys
import types
import struct
if hasattr(sys.__stdout__, "newlines"):
READ_MODE = "U" # universal line endings
else:
# remain compatible with Python < 2.3
READ_MODE = "r"
LOAD_CONST = chr(dis.opname.index('LOAD_CONST'))
IMPORT_NAME = chr(dis.opname.index('IMPORT_NAME'))
STORE_NAME = chr(dis.opname.index('STORE_NAME'))
STORE_GLOBAL = chr(dis.opname.index('STORE_GLOBAL'))
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = chr(dis.HAVE_ARGUMENT)
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around the
# way the _xmlplus package injects itself under the name "xml" into
# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print " ",
print str,
for arg in args:
print repr(arg),
print
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
fp = open(pathname, READ_MODE)
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
fp = open(pathname, READ_MODE)
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError, "relative importpath too deep"
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError, "No module named " + mname
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
for triple in imp.get_suffixes():
suffixes.append(triple[0])
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError, "Bad magic number in %s" % pathname
fp.read(4)
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if c == LOAD_CONST and code[3] == IMPORT_NAME:
oparg_1, oparg_2 = unpack('<xHxH', code[:6])
yield "import", (consts[oparg_1], names[oparg_2])
code = code[6:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Python 2.5 version (has absolute and relative imports)
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_LOAD_AND_IMPORT = LOAD_CONST + LOAD_CONST + IMPORT_NAME
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if code[:9:3] == LOAD_LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == -1: # normal import
yield "import", (consts[oparg_2], names[oparg_3])
elif level == 0: # absolute import
yield "absolute_import", (consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (level, consts[oparg_2], names[oparg_3])
code = code[9:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_code(self, co, m):
code = co.co_code
if sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import": level = 0
else: level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError, name
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print
print " %-25s %s" % ("Name", "File")
print " %-25s %s" % ("----", "----")
# Print modules found
keys = self.modules.keys()
keys.sort()
for key in keys:
m = self.modules[key]
if m.__path__:
print "P",
else:
print "m",
print "%-25s" % key, m.__file__ or ""
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print
print "Missing modules:"
for name in missing:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
# Print modules that may be missing, but then again, maybe not...
if maybe:
print
print "Submodules thay appear to be missing, but could also be",
print "global names in the parent package:"
for name in maybe:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error, msg:
print msg
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print "path:"
for item in path:
print " ", repr(item)
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print "\n[interrupt]"
| Python |
"""Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <liw@iki.fi>.
#
# Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Astrand <astrand@lysator.liu.se> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError('option -%s not recognized' % opt, opt)
if __name__ == '__main__':
import sys
print getopt(sys.argv[1:], "a:b", ["alpha=", "beta"])
| Python |
#! /usr/bin/env python
# Copyright 1994 by Lance Ellinghouse
# Cathedral City, California Republic, United States of America.
# All Rights Reserved
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Lance Ellinghouse
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Modified by Jack Jansen, CWI, July 1995:
# - Use binascii module to do the actual line-by-line conversion
# between ascii and binary. This results in a 1000-fold speedup. The C
# version is still 5 times faster, though.
# - Arguments more compliant with python standard
"""Implementation of the UUencode and UUdecode functions.
encode(in_file, out_file [,name, mode])
decode(in_file [, out_file, mode])
"""
import binascii
import os
import sys
__all__ = ["Error", "encode", "decode"]
class Error(Exception):
pass
def encode(in_file, out_file, name=None, mode=None):
"""Uuencode file"""
#
# If in_file is a pathname open it and change defaults
#
opened_files = []
try:
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, basestring):
if name is None:
name = os.path.basename(in_file)
if mode is None:
try:
mode = os.stat(in_file).st_mode
except AttributeError:
pass
in_file = open(in_file, 'rb')
opened_files.append(in_file)
#
# Open out_file if it is a pathname
#
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, basestring):
out_file = open(out_file, 'wb')
opened_files.append(out_file)
#
# Set defaults for name and mode
#
if name is None:
name = '-'
if mode is None:
mode = 0666
#
# Write the data
#
out_file.write('begin %o %s\n' % ((mode&0777),name))
data = in_file.read(45)
while len(data) > 0:
out_file.write(binascii.b2a_uu(data))
data = in_file.read(45)
out_file.write(' \nend\n')
finally:
for f in opened_files:
f.close()
def decode(in_file, out_file=None, mode=None, quiet=0):
"""Decode uuencoded file"""
#
# Open the input file, if needed.
#
opened_files = []
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, basestring):
in_file = open(in_file)
opened_files.append(in_file)
try:
#
# Read until a begin is encountered or we've exhausted the file
#
while True:
hdr = in_file.readline()
if not hdr:
raise Error('No valid begin line found in input file')
if not hdr.startswith('begin'):
continue
hdrfields = hdr.split(' ', 2)
if len(hdrfields) == 3 and hdrfields[0] == 'begin':
try:
int(hdrfields[1], 8)
break
except ValueError:
pass
if out_file is None:
out_file = hdrfields[2].rstrip()
if os.path.exists(out_file):
raise Error('Cannot overwrite existing file: %s' % out_file)
if mode is None:
mode = int(hdrfields[1], 8)
#
# Open the output file
#
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, basestring):
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
opened_files.append(out_file)
#
# Main decoding loop
#
s = in_file.readline()
while s and s.strip() != 'end':
try:
data = binascii.a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3
data = binascii.a2b_uu(s[:nbytes])
if not quiet:
sys.stderr.write("Warning: %s\n" % v)
out_file.write(data)
s = in_file.readline()
if not s:
raise Error('Truncated input file')
finally:
for f in opened_files:
f.close()
def test():
"""uuencode/uudecode main program"""
import optparse
parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) > 2:
parser.error('incorrect number of arguments')
sys.exit(1)
input = sys.stdin
output = sys.stdout
if len(args) > 0:
input = args[0]
if len(args) > 1:
output = args[1]
if options.decode:
if options.text:
if isinstance(output, basestring):
output = open(output, 'w')
else:
print sys.argv[0], ': cannot do -t to stdout'
sys.exit(1)
decode(input, output)
else:
if options.text:
if isinstance(input, basestring):
input = open(input, 'r')
else:
print sys.argv[0], ': cannot do -t from stdin'
sys.exit(1)
encode(input, output)
if __name__ == '__main__':
test()
| Python |
"""Load / save to libwww-perl (LWP) format files.
Actually, the format is slightly extended from that used by LWP's
(libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information
not recorded by LWP.
It uses the version string "2.0", though really there isn't an LWP Cookies
2.0 format. This indicates that there is extra information in here
(domain_dot and # port_spec) while still being compatible with
libwww-perl, I hope.
"""
import time, re
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
Cookie, MISSING_FILENAME_TEXT,
join_header_words, split_header_words,
iso2time, time2isoz)
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in an the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
keys = cookie._rest.keys()
keys.sort()
for k in keys:
h.append((k, str(cookie._rest[k])))
h.append(("version", str(cookie.version)))
return join_header_words([h])
class LWPCookieJar(FileCookieJar):
"""
The LWPCookieJar saves a sequence of"Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl libary, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
Additional methods
as_lwp_str(ignore_discard=True, ignore_expired=True)
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""])
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
# There really isn't an LWP Cookies 2.0 format, but this indicates
# that there is extra information in here (domain_dot and
# port_spec) while still being compatible with libwww-perl, I hope.
f.write("#LWP-Cookies-2.0\n")
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
finally:
f.close()
def _really_load(self, f, filename, ignore_discard, ignore_expires):
magic = f.readline()
if not re.search(self.magic_re, magic):
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
"file" % filename)
raise LoadError(msg)
now = time.time()
header = "Set-Cookie3:"
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
"secure", "discard")
value_attrs = ("version",
"port", "path", "domain",
"expires",
"comment", "commenturl")
try:
while 1:
line = f.readline()
if line == "": break
if not line.startswith(header):
continue
line = line[len(header):].strip()
for data in split_header_words([line]):
name, value = data[0]
standard = {}
rest = {}
for k in boolean_attrs:
standard[k] = False
for k, v in data[1:]:
if k is not None:
lc = k.lower()
else:
lc = None
# don't lose case distinction for unknown fields
if (lc in value_attrs) or (lc in boolean_attrs):
k = lc
if k in boolean_attrs:
if v is None: v = True
standard[k] = v
elif k in value_attrs:
standard[k] = v
else:
rest[k] = v
h = standard.get
expires = h("expires")
discard = h("discard")
if expires is not None:
expires = iso2time(expires)
if expires is None:
discard = True
domain = h("domain")
domain_specified = domain.startswith(".")
c = Cookie(h("version"), name, value,
h("port"), h("port_spec"),
domain, domain_specified, h("domain_dot"),
h("path"), h("path_spec"),
h("secure"),
expires,
discard,
h("comment"),
h("commenturl"),
rest)
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
(filename, line))
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.