code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/binn/env/python
from random import shuffle, choice, sample, random, randint
from bisect import bisect
from itertools import izip_longest
from math import sqrt
# helper stuff
fst = lambda s: s[0]
snd = lambda s: s[1]
def grouper(n, iterable, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def rand_index_pair(start, end):
"""returns two distinct, randomly chosen elements in [start, end]"""
x0 = randint(start, end)
x1 = x0
while x1 == x0:
x1 = randint(start, end)
if x0 > x1:
return (x1, x0)
return (x0, x1)
def cdf(weights):
total = sum(weights)
result = []
cumsum = 0.0
for w in weights:
cumsum += w
result.append(cumsum / total)
return result
def prob_choice(population, cdf):
assert len(population) == len(cdf)
x = random()
idx = bisect(cdf, x)
return population[idx]
##### selectors #####
def tournament_selection(population, num):
selector = lambda (a, b): a if snd(a) <= snd(b) else b
for _ in xrange(num):
yield selector(sample(population, 2))
def rank_selection(population, num):
population = sorted(population, key=snd)
N = len(population)
probs = cdf([(2.0 / N) * (1.0 - (k - 1.0) / (N - 1.0))
for k in xrange(1, N + 1)])
for _ in xrange(num):
yield prob_choice(population, probs)
def roulette_selection(population, num):
probs = cdf(map(snd, population))
for _ in xrange(num):
yield prob_choice(population, probs)
##### cross-overs #####
def one_point_crossover(dad, mom):
def cmp_snd(a, b):
return cmp(snd(a), snd(b))
def cmp_fst(a, b):
return cmp(fst(a), fst(b))
cut = randint(0, len(mom))
# cut both genomes in half
#print "crossover at {0}".format(cut)
#print "{0} || {1}".format(" ".join(map(str, dad[:cut])), " ".join(map(str, dad[cut:])))
#print "{0} || {1}".format(" ".join(map(str, mom[:cut])), " ".join(map(str, mom[cut:])))
#print "-" * (4 + 2 * (len(mom) - 1))
dad_set = set(dad[cut:])
mom_set = set(mom[cut:])
child0 = dad[:cut] + [x for x in mom if x in dad_set]
child1 = mom[:cut] + [x for x in dad if x in mom_set]
#print "{0} || {1}".format(" ".join(map(str, dad[:cut])), " ".join(map(str, dad[cut:])))
#print "{0} || {1}".format(" ".join(map(str, mom[:cut])), " ".join(map(str, mom[cut:])))
#print
if random() < 0.5:
return child0
else:
return child1
def partially_matched_crossover(dad, mom):
a, b = rand_index_pair(0, len(dad))
# cut both genomes
#dad_0, dad_m, dad_1 = dad[:a], dad[a:b], dad[b:]
#mom_0, mom_m, mom_1 = mom[:a], mom[a:b], mom[b:]
#print "crossover at {0}, {1}".format(a, b)
#print "{0} || {1} || {2}".format(" ".join(map(str, dad_0)), " ".join(map(str, dad_m)), " ".join(map(str, dad_1)))
#print "{0} || {1} || {2}".format(" ".join(map(str, mom_0)), " ".join(map(str, mom_m)), " ".join(map(str, mom_1)))
#print "-" * (6 + 2 * (len(mom) - 1))
child0 = dad[:]
child1 = mom[:]
p1, p2 = [0] * len(dad), [0] * len(dad)
for i in xrange(len(dad)):
p1[child0[i]] = i
p2[child1[i]] = i
for i in xrange(a, b):
# Keep track of the selected values
temp1 = child0[i]
temp2 = child1[i]
# Swap the matched value
child0[i], child0[p1[temp2]] = temp2, temp1
child1[i], child1[p2[temp1]] = temp1, temp2
# Position bookkeeping
p1[temp1], p1[temp2] = p1[temp2], p1[temp1]
p2[temp1], p2[temp2] = p2[temp2], p2[temp1]
#dad = dad[:cut] + [x for x in mom if x in dad_set]
#mom = mom[:cut] + [x for x in dad if x in mom_set]
#print "{0} || {1} || {2}".format(" ".join(map(str, child0[:a])), " ".join(map(str, child0[a:b])), " ".join(map(str, child0[b:])))
#print "{0} || {1} || {2}".format(" ".join(map(str, child1[:a])), " ".join(map(str, child1[a:b])), " ".join(map(str, child1[b:])))
assert(len(set(child0)) == len(set(child1)) == len(mom) == len(dad))
if random() < 0.5:
return child0
else:
return child1
def edge_recombination_crossover(ind0, ind1):
neighbors = [set() for _ in xrange(len(ind0))]
for u, v in zip(ind0, ind0[1:]) + [(ind0[-1], ind0[0])]:
neighbors[u].add(v)
neighbors[v].add(u)
for u, v in zip(ind1, ind1[1:]) + [(ind1[-1], ind1[0])]:
neighbors[u].add(v)
neighbors[v].add(u)
if random() < 0.5:
current = ind0[0]
else:
current = ind1[0]
child = [current]
seen = set([current])
while len(child) < len(ind0):
for n in neighbors:
if current in n:
n.remove(current)
smallest = None
smallest_n = None
for n in neighbors[current]:
if len(neighbors[n]) < smallest_n or smallest_n == None:
smallest = n
smallest_n = len(neighbors[n])
current = smallest
if current == None:
current = choice([i for i in xrange(len(ind0)) if i not in seen])
child.append(current)
seen.add(current)
assert(len(child) == len(ind0))
return child
##### mutators #####
def swap_mutator(xs):
x0, x1 = rand_index_pair(0, len(xs) - 1)
xs[x0], xs[x1] = xs[x1], xs[x0]
return xs
def inversion_mutator(xs):
x0, x1 = rand_index_pair(0, len(xs) - 1)
xs[x0:x1 + 1] = list(reversed(xs[x0:x1 + 1]))
return xs
##### analyze #####
def analyze_population(iteration, maxiter, population, children):
min_ind = min(population, key=snd)[1]
max_ind = max(population, key=snd)[1]
mean = sum(map(snd, population)) / float(len(population))
variance = sum((x[1] - mean)**2 for x in population) / float(len(population) - 1)
print "{0:{2}d}/{1} ({3}), min={4}, max={5}, mean={6:.2f}, dev={7:.2f} ({8} unique childs, {9} childs + parents)".format(
iteration, maxiter, len(str(maxiter)),
len(population), min_ind, max_ind, mean, sqrt(variance), len(set(tuple(xs) for xs in children)),
len(set([tuple(xs) for xs in children])) + len(set([tuple(xs) for (xs, _) in population])))
def tspEA(instance,
crossover_selection,
crossover,
mutation,
ngen_selection,
num_parents,
num_children,
mutation_rate,
maxiter,
elitism):
# initialize population
population = []
for _ in xrange(num_parents):
xs = range(len(instance))
shuffle(xs)
population.append((xs, instance.tour_length(xs)))
# start loop
best_individuum = min(population, key=snd)
print (0, best_individuum)
for iteration in xrange(1, maxiter + 1):
old_best = best_individuum
#print "iteration {0}".format(iteration)
tmp_children = []
# select pairs for crossover + children
for ind0, ind1 in grouper(2, crossover_selection(population, 2 * num_children)):
child = crossover(ind0[0], ind1[0])
tmp_children.append(child)
# mutate
for child in tmp_children:
if random() < mutation_rate:
mutation(child)
#analyze_population(iteration, maxiter, population, tmp_children)
# evaluation
for child in tmp_children:
child_fitness = instance.tour_length(child)
individuum = (child, child_fitness)
if child_fitness < best_individuum[1]:
best_individuum = individuum
population.append((child, child_fitness))
#print len(population)
# select new population
new_population = []
seen_individuums = set()
if elitism is True:
new_population = [best_individuum]
seen_individuums.add(tuple(best_individuum[0]))
tries = 0
while len(new_population) < num_parents:
if tries >= 20:
break
tries += 1
for individuum in ngen_selection(population, 2 * num_parents):
if len(new_population) >= num_parents:
break
hashable = tuple(individuum[0])
if hashable not in seen_individuums:
seen_individuums.add(hashable)
new_population.append(individuum)
population = new_population
if best_individuum != old_best:
print (iteration, best_individuum)
print best_individuum
if __name__ == '__main__':
import tsp
import argparse
instance = tsp.TSP.from_file("data/TSP/ulysses16edit.tsp")
tspEA(instance=instance,
crossover_selection=tournament_selection,
crossover=one_point_crossover,
mutation=swap_mutator,
ngen_selection=tournament_selection,
num_parents=50,
num_children=50,
mutation_rate=0.05,
maxiter=1000,
elitism=True)
| Python |
#!/usr/bin/env python
from itertools import product
class TSP:
def __init__(self, graph):
"""graph = networkx undirected graph object. each edge has
attribute "length"."""
self.graph = graph.copy()
def __len__(self):
return len(self.graph)
def tour_length(self, permutation):
assert(len(permutation) == len(set(permutation)) == len(self.graph))
acc = self.graph[permutation[0]][permutation[-1]]['length']
for u, v in zip(permutation, permutation[1:]):
acc += self.graph[u][v]['length']
return acc
def __repr__(self):
return """<TSP {0} nodes>""".format(len(self.graph))
def generate_ip(self):
import gurobipy as grb
m = grb.Model(self.__repr__())
# convert to directed
g = self.graph.to_directed()
# edge varibles
for u, v, data in g.edges(data=True):
data['edge_var'] = m.addVar(vtype=grb.GRB.BINARY,
name="edge_{0}_{1}".format(u, v), obj=data['length'])
m.update()
# add in/out constraints
for node in g:
out_cons = grb.quicksum(g[node][v]['edge_var']
for _, v in g.out_edges(node))
in_cons = grb.quicksum(g[v][node]['edge_var']
for v, _ in g.in_edges(node))
m.addConstr(out_cons == 1, name="out_cons_{0}".format(node))
m.addConstr(in_cons == 1, name="in_cons_{0}".format(node))
m.update()
# add subtour constraints
first_city = g.nodes()[0]
for node, data in g.nodes(data=True):
if node == first_city:
continue
data['seq'] = m.addVar(name="seq_{0}".format(node))
m.update()
for i, j in product(g.nodes(), g.nodes()):
if i == first_city or j == first_city or i == j:
continue
u_i, u_j = g.node[i]['seq'], g.node[j]['seq']
n = len(g)
m.addConstr(u_i - u_j + n * g[i][j]['edge_var'] <= n - 1,
name="subtour_{0}_{1}".format(i, j))
m.update()
return m
@staticmethod
def from_file(filename):
from networkx import Graph
g = Graph()
with open(filename) as f:
read_edges = False
read_nodes = False
for line in f:
if line.startswith("EDGE_LENGTH_SECTION"):
read_edges = True
read_nodes = False
continue
if line.startswith("NODE_COORD_SECTION"):
read_edges = False
read_nodes = True
continue
if "EOF" in line:
break
if read_nodes:
n, x, y = line.strip().split(" ")
g.add_node(int(n) - 1, pos=(float(x), float(y)))
if read_edges:
u, v, l = map(int, line.split())
g.add_edge(u - 1, v - 1, length=l)
return TSP(g)
stuff_to_draw = [(0, ([0, 13, 5, 9, 12, 8, 10, 6, 14, 2, 11, 4, 7, 1, 3, 15], 10872)),
(1, ([2, 3, 1, 12, 0, 11, 15, 7, 14, 4, 13, 6, 5, 8, 10, 9], 10026)),
(3, ([4, 14, 11, 15, 7, 3, 1, 12, 5, 9, 10, 8, 6, 13, 2, 0], 9926)),
(5, ([8, 12, 13, 2, 3, 1, 0, 11, 15, 7, 14, 4, 6, 5, 9, 10], 9434)),
(7, ([8, 4, 14, 11, 15, 7, 3, 12, 13, 2, 1, 0, 6, 5, 10, 9], 9203)),
(8, ([8, 4, 14, 11, 15, 7, 3, 12, 13, 2, 1, 0, 6, 5, 9, 10], 9060)),
(9, ([4, 6, 8, 12, 13, 14, 11, 15, 7, 3, 2, 1, 0, 5, 9, 10], 8699)),
(10, ([8, 12, 13, 11, 6, 3, 1, 2, 0, 15, 7, 14, 4, 5, 9, 10], 8300)),
(16, ([8, 4, 14, 11, 12, 13, 15, 7, 2, 1, 3, 0, 6, 5, 9, 10], 8052)),
(25, ([8, 4, 14, 12, 13, 11, 15, 7, 2, 1, 3, 0, 6, 5, 9, 10], 8008)),
(30, ([8, 5, 4, 14, 12, 13, 11, 15, 7, 2, 1, 3, 0, 6, 9, 10], 7892)),
(38, ([8, 5, 4, 14, 12, 13, 11, 15, 7, 3, 2, 1, 0, 6, 9, 10], 7891)),
(43, ([8, 5, 4, 14, 12, 13, 11, 6, 0, 7, 3, 1, 2, 15, 9, 10], 7750)),
(51, ([8, 5, 4, 14, 12, 13, 6, 11, 0, 7, 3, 1, 2, 15, 9, 10], 7675)),
(62, ([10, 5, 4, 14, 12, 13, 11, 6, 15, 0, 7, 3, 1, 2, 9, 8], 7514)),
(65, ([8, 10, 4, 14, 11, 12, 13, 15, 7, 3, 1, 2, 0, 9, 6, 5], 7374)),
(66, ([8, 10, 4, 14, 11, 12, 13, 5, 6, 0, 7, 3, 1, 2, 15, 9], 7148)),
(69, ([8, 10, 4, 14, 12, 13, 5, 6, 11, 0, 7, 3, 1, 2, 15, 9], 7031)),
(117, ([8, 10, 4, 14, 5, 6, 12, 13, 11, 15, 0, 7, 3, 1, 2, 9], 6959)),
(122, ([8, 10, 4, 14, 5, 6, 12, 13, 11, 0, 7, 3, 1, 2, 15, 9], 6935)),
(127, ([8, 10, 4, 14, 5, 6, 11, 12, 13, 0, 7, 3, 1, 2, 15, 9], 6859))
]
def graph_from_tour(instance, tour):
import networkx as nx
g = nx.Graph()
for node, data in instance.graph.nodes(data=True):
g.add_node(node, **data)
for (u, v) in zip(tour, tour[1:]) + [(tour[-1], tour[0])]:
g.add_edge(u, v)
return g
def draw_tours(instance):
import matplotlib.pyplot as plt
import networkx as nx
for i, (iteration, (tour, quality)) in enumerate(stuff_to_draw):
plt.close()
g = graph_from_tour(instance, tour)
pos = dict((u, d['pos']) for (u, d) in g.nodes(data=True))
nx.draw(g, pos=pos, node_size=200, width=3)
plt.annotate("iter: {0}".format(iteration), xy=(40, -3))
plt.annotate("len: {0}".format(quality), xy=(40, -4.4))
plt.savefig("stuff_{1}_{0:02d}.png".format(i, iteration))
| Python |
#!/usr/bin/env python
class SAT:
def __init__(self, clauses):
"""variable x_1 is denotet by '1', its negation by '-1'.
clauses is a list of varible-lists:
(x_1 or not x_2) and (x_2) == ((1, -2), (2,))."""
self.num_variables = len(set(abs(x) for xs in clauses for x in xs))
self.num_clauses = len(clauses)
self.clauses = clauses
def num_satisfied_clauses(self, variables):
"""variables is a list of True/False or 1/0 values."""
counter = 0
for clause in self.clauses:
for literal in clause:
if ((literal < 0 and variables[abs(literal) - 1] in (False, 0, 0.0)) or
(literal > 0 and variables[literal - 1] in (True, 1, 1.0))):
counter += 1
#print "counter increased"
break
else:
pass
#print "did not count some item."
#import pdb
#pdb.set_trace()
return counter
def __repr__(self):
return """<MaxSAT {0} variables, {1} clauses>""".format(
self.num_variables, self.num_clauses)
def generate_ip(self):
import gurobipy as grb
m = grb.Model(self.__repr__())
# add variables
literals = [m.addVar(vtype=grb.GRB.BINARY, name="x_{0}".format(i))
for i in xrange(1, self.num_variables + 1)]
clauses = [m.addVar(vtype=grb.GRB.BINARY, obj=1,
name="clause_{0}".format(i)) for i in
xrange(1, self.num_clauses + 1)]
m.update()
# add constraints
for clause, clause_indicator in zip(self.clauses, clauses):
#print clause, clause_indicator
pos_vars = grb.quicksum(literals[i - 1] for i in clause if i > 0)
neg_vars = grb.quicksum(1 - literals[abs(i) - 1] for i in clause if i < 0)
m.addConstr(pos_vars + neg_vars >= clause_indicator)
m.update()
# maximize clauses
m.modelSense = -1
return m
@staticmethod
def from_file(filename):
with open(filename) as f:
clauses = []
current_clause = []
for line in f:
if line.startswith("c") or line.startswith("p"):
continue
nums = map(int, line.split())
for num in nums:
if num == 0:
clauses.append(current_clause)
current_clause = []
else:
current_clause.append(num)
return SAT(clauses)
def satEA(instance):
from pyevolve import G1DBinaryString, GSimpleGA
#eval_func = lambda s: instance.num_satisfied_clauses(s)
def eval_func(s, instance=instance):
return instance.num_satisfied_clauses(s.genomeList)
#eval_func = partial(eval_func, instance=instance)
# Genome instance
genome = G1DBinaryString.G1DBinaryString(instance.num_variables)
# The evaluator function (objective function)
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome)
# some paramters
ga.setGenerations(1000)
#ga.setCrossoverRate(1.0)
ga.setMutationRate(0.1)
ga.setPopulationSize(100)
# Do the evolution, with stats dump
# frequency of 10 generations
ga.setInteractiveGeneration(1)
ga.evolve(freq_stats=10)
# Best individual
print ga.bestIndividual()
| Python |
#!/usr/bin/env python
from itertools import product
import gurobipy as grb
def parse_lines(iterator, rows, cols):
"""parse cols*rows lines from iterator.
return list of lists where
xs[row] = [ids of cols with value 1]
ex.:
input
0
0
1
1
parse_lines(_, 2, 2):
xs = [[], [0, 1]]
"""
xs = [[] for _ in xrange(rows)]
for row in xrange(rows):
for col in xrange(cols):
val = int(next(iterator))
if val == 1:
xs[row].append(col)
return xs
def add_indicator(model, variable, value, bigM, aux=None, indicator=None):
import gurobipy as grb
update = False
if aux is None:
aux = model.addVar(vtype=grb.GRB.BINARY)
update = True
if indicator is None:
indicator = model.addVar(vtype=grb.GRB.BINARY)
update = True
if update:
model.update()
model.addConstr(variable <= value - 1 + bigM * aux + indicator)
model.addConstr(variable >= value + 1 - (1 - aux) * bigM)
return indicator
def mycallback(model, where):
if where == grb.GRB.callback.MIPSOL:
obj = model.cbGet(grb.GRB.callback.MIPSOL_OBJ)
nodecnt = int(model.cbGet(grb.GRB.callback.MIPSOL_NODCNT))
print '*** New solution at node', nodecnt, 'objective', obj
#print model.cbGetSolution(model.getVars())
with open(model._filename + str(m._solcounter) + ".sol", "w") as f:
f.write("obj = {0}\n".format(obj))
for (index, var) in model._variables.items():
x = model.cbGetSolution(var)
if x > 0.5:
f.write("{0}\n".format(index, var))
m._solcounter += 1
class Timetable:
def __init__(self, room_sizes, student_event, room_feature, event_feature,
num_features):
# save some information
self.rooms = range(len(room_sizes))
self.room_sizes = room_sizes
self.students = range(len(student_event))
self.events = range(len(event_feature))
self.features = range(num_features)
# create mappings for student <-> event
event_student = [[] for _ in self.events]
for student, events in enumerate(student_event):
for event in events:
event_student[event].append(student)
self.student_event = student_event
self.event_student = event_student
# create mappings for room <-> event pairings
room_event = [[] for _ in self.rooms]
event_room = [[] for _ in self.events]
for room, event in product(self.rooms, self.events):
roomsize = room_sizes[room]
num_participants = len(event_student[event])
if roomsize < num_participants:
continue
if not set(event_feature[event]).issubset(
set(room_feature[room])):
continue
room_event[room].append(event)
event_room[event].append(room)
self.event_room = event_room
self.room_event = room_event
# save the rest. these values are not needed but could be used for
# feasibility checking
self.room_feature = room_feature
self.event_feature = event_feature
# default settings:
# 5 days a 9 slots
self.days = range(5)
self.hours = range(9)
self.timeslots = [(d, h) for d in self.days for h in self.hours]
def __repr__(self):
return """<Timetable {0} students, {1} rooms, """.format(
len(self.students), len(self.rooms)) + \
"""{0} events, {1} features>""".format(
len(self.events), len(self.features))
def generate_ip(self, verbose=True,
single_event_penalty=True,
last_hour_penalty=True):
import gurobipy as grb
if verbose:
print "generating model"
m = grb.Model(self.__repr__())
if verbose:
print "adding variables"
# lets go
variables = dict()
for h in self.hours:
for e in self.events:
obj = 0
if last_hour_penalty and h == self.hours[-1]:
obj = len(self.event_student[e])
for d in self.days:
t = (d, h)
for r in self.rooms:
variables[(e, r, t)] = m.addVar(vtype=grb.GRB.BINARY,
obj=obj, name="e{0}_r{1}_d{2}_h{3}".format(e, r, *t))
# adding #day x #students many auxiliary and indicator variables
aux_variables = dict(((student, day),
m.addVar(vtype=grb.GRB.BINARY,
name="aux_s{0}_d{1}".format(student, day)))
for student, day in product(self.students, self.days))
indicator_variables = dict(((student, day),
m.addVar(vtype=grb.GRB.BINARY,
name="ind_s{0}_d{1}".format(student, day)))
for student, day in product(self.students, self.days))
#for e in self.events:
# num_students = len(self.event_student[e])
# for r in self.rooms:
# for t
#variables = dict(((e, r, t), m.addVar(vtype=grb.GRB.BINARY,
# obj=()
# name="e{0}_r{1}_d{2}_h{3}".format(e, r, *t)))
# for (e, r, t) in all_ind())
m.update()
if verbose:
print "adding event<->room pairing constraints"
# restrict room <-> event pairing
for (e, r) in product(self.events, self.rooms):
for t in self.timeslots:
if e in self.room_event[r]:
continue
m.addConstr(variables[(e, r, t)] == 0)
#m.update()
if verbose:
print "adding non-overlapping events constraints"
# at most one event per room & timeslot
for (r, t) in product(self.rooms, self.timeslots):
m.addConstr(grb.quicksum(variables[(e, r, t)]
for e in self.room_event[r]) <= 1)
#m.update()
if verbose:
print "adding student/event constraints"
# every student goes to at most one event at the same time
for student in self.students:
for t in self.timeslots:
m.addConstr(grb.quicksum(variables[(e, r, t)]
for e in self.student_event[student]
for r in self.event_room[e]) <= 1)
#m.update()
if verbose:
print "adding event<->timeslot/room constraints"
# find slot / room for every event
for e in self.events:
m.addConstr(grb.quicksum(variables[(e, r, t)]
for r in self.event_room[e]
for t in self.timeslots) == 1)
#m.update()
# add objective functions
# a student has a class in the last slot of the day;
#for day in self.days:
# t = (day, self.hours[-1])
# for (e, r) in product(self.events, self.rooms):
# variables[(e, r, t)].obj = len(self.event_student[e])
if single_event_penalty:
if verbose:
print "adding single event penalty"
# a student has a single class on a day.
# sum up the number of classes on each day for each student
# if the number is == 1: incur fixed cost
num_classes = dict()
for s in self.students:
if len(self.hours) == 1:
print "only one slot per day, check penalties!"
for day in self.days:
num_classes = \
grb.quicksum(variables[(e, r, (day, t))]
for e in self.student_event[s]
for r in self.rooms
for t in self.hours)
indicator = add_indicator(m, num_classes, 1, len(self.hours) + 1,
aux=aux_variables[(s, day)],
indicator=indicator_variables[(s, day)])
indicator.obj = 1
print "updating ...",
m.update()
# adding information for callback
m._variables = variables
#m._instance = self
print "done"
m.params.heuristics = 0.15
m._solcounter = 0
# a student has more than two classes consecutively;
return m
@staticmethod
def from_file(filename):
with open(filename) as f:
line_iter = iter(f)
# parse meta information
meta = line_iter.next()
num_events, num_rooms, num_features, num_students = \
map(int, meta.split())
# parse room sizes
room_sizes = [int(line_iter.next()) for _ in xrange(num_rooms)]
# parse student/event matrix
student_event = parse_lines(line_iter, num_students, num_events)
# parse room/feature matrix
room_feature = parse_lines(line_iter, num_rooms, num_features)
# parse event/feature matrix
event_feature = parse_lines(line_iter, num_events, num_features)
iteratorempty = False
try:
line_iter.next()
except StopIteration:
iteratorempty = True
if iteratorempty is False:
print "possible bug, not all information consumed!"
import IPython
IPython.embed()
assert(num_students == len(student_event))
assert(num_rooms == len(room_sizes))
assert(num_events == len(event_feature))
#import IPython
#IPython.embed()
return Timetable(room_sizes=room_sizes,
student_event=student_event,
room_feature=room_feature,
event_feature=event_feature,
num_features=num_features)
def timetableEA(instance):
from pyevolve import G1DBinaryString, GSimpleGA
#eval_func = lambda s: instance.num_satisfied_clauses(s)
def eval_func(s, instance=instance):
return instance.num_satisfied_clauses(s.genomeList)
#eval_func = partial(eval_func, instance=instance)
# Genome instance
genome = G1DBinaryString.G1DBinaryString(instance.num_variables)
# The evaluator function (objective function)
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome)
# some paramters
ga.setGenerations(1000)
#ga.setCrossoverRate(1.0)
ga.setMutationRate(0.1)
ga.setPopulationSize(100)
# Do the evolution, with stats dump
# frequency of 10 generations
ga.setInteractiveGeneration(1)
ga.evolve(freq_stats=10)
# Best individual
print ga.bestIndividual()
if __name__ == '__main__':
import sys
import IPython
filename = "data/Timetabling/simple_test.tim"
if len(sys.argv) > 1:
filename = sys.argv[1]
simple = Timetable.from_file(filename)
m = simple.generate_ip()
m._filename = filename
m.optimize(mycallback)
m.printAttr("X")
IPython.embed()
| Python |
#!/usr/binn/env/python
from random import shuffle, choice, sample, random, randint
from bisect import bisect
from itertools import izip_longest
from math import sqrt
# helper stuff
fst = lambda s: s[0]
snd = lambda s: s[1]
def grouper(n, iterable, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def rand_index_pair(start, end):
"""returns two distinct, randomly chosen elements in [start, end]"""
x0 = randint(start, end)
x1 = x0
while x1 == x0:
x1 = randint(start, end)
if x0 > x1:
return (x1, x0)
return (x0, x1)
def cdf(weights):
total = sum(weights)
result = []
cumsum = 0.0
for w in weights:
cumsum += w
result.append(cumsum / total)
return result
def prob_choice(population, cdf):
assert len(population) == len(cdf)
x = random()
idx = bisect(cdf, x)
return population[idx]
##### selectors #####
def tournament_selection(population, num):
selector = lambda (a, b): a if snd(a) <= snd(b) else b
for _ in xrange(num):
yield selector(sample(population, 2))
def rank_selection(population, num):
population = sorted(population, key=snd)
N = len(population)
probs = cdf([(2.0 / N) * (1.0 - (k - 1.0) / (N - 1.0))
for k in xrange(1, N + 1)])
for _ in xrange(num):
yield prob_choice(population, probs)
def roulette_selection(population, num):
probs = cdf(map(snd, population))
for _ in xrange(num):
yield prob_choice(population, probs)
##### cross-overs #####
def one_point_crossover(dad, mom):
def cmp_snd(a, b):
return cmp(snd(a), snd(b))
def cmp_fst(a, b):
return cmp(fst(a), fst(b))
cut = randint(0, len(mom))
# cut both genomes in half
#print "crossover at {0}".format(cut)
#print "{0} || {1}".format(" ".join(map(str, dad[:cut])), " ".join(map(str, dad[cut:])))
#print "{0} || {1}".format(" ".join(map(str, mom[:cut])), " ".join(map(str, mom[cut:])))
#print "-" * (4 + 2 * (len(mom) - 1))
dad_set = set(dad[cut:])
mom_set = set(mom[cut:])
child0 = dad[:cut] + [x for x in mom if x in dad_set]
child1 = mom[:cut] + [x for x in dad if x in mom_set]
#print "{0} || {1}".format(" ".join(map(str, dad[:cut])), " ".join(map(str, dad[cut:])))
#print "{0} || {1}".format(" ".join(map(str, mom[:cut])), " ".join(map(str, mom[cut:])))
#print
if random() < 0.5:
return child0
else:
return child1
def partially_matched_crossover(dad, mom):
a, b = rand_index_pair(0, len(dad))
# cut both genomes
#dad_0, dad_m, dad_1 = dad[:a], dad[a:b], dad[b:]
#mom_0, mom_m, mom_1 = mom[:a], mom[a:b], mom[b:]
#print "crossover at {0}, {1}".format(a, b)
#print "{0} || {1} || {2}".format(" ".join(map(str, dad_0)), " ".join(map(str, dad_m)), " ".join(map(str, dad_1)))
#print "{0} || {1} || {2}".format(" ".join(map(str, mom_0)), " ".join(map(str, mom_m)), " ".join(map(str, mom_1)))
#print "-" * (6 + 2 * (len(mom) - 1))
child0 = dad[:]
child1 = mom[:]
p1, p2 = [0] * len(dad), [0] * len(dad)
for i in xrange(len(dad)):
p1[child0[i]] = i
p2[child1[i]] = i
for i in xrange(a, b):
# Keep track of the selected values
temp1 = child0[i]
temp2 = child1[i]
# Swap the matched value
child0[i], child0[p1[temp2]] = temp2, temp1
child1[i], child1[p2[temp1]] = temp1, temp2
# Position bookkeeping
p1[temp1], p1[temp2] = p1[temp2], p1[temp1]
p2[temp1], p2[temp2] = p2[temp2], p2[temp1]
#dad = dad[:cut] + [x for x in mom if x in dad_set]
#mom = mom[:cut] + [x for x in dad if x in mom_set]
#print "{0} || {1} || {2}".format(" ".join(map(str, child0[:a])), " ".join(map(str, child0[a:b])), " ".join(map(str, child0[b:])))
#print "{0} || {1} || {2}".format(" ".join(map(str, child1[:a])), " ".join(map(str, child1[a:b])), " ".join(map(str, child1[b:])))
assert(len(set(child0)) == len(set(child1)) == len(mom) == len(dad))
if random() < 0.5:
return child0
else:
return child1
def edge_recombination_crossover(ind0, ind1):
neighbors = [set() for _ in xrange(len(ind0))]
for u, v in zip(ind0, ind0[1:]) + [(ind0[-1], ind0[0])]:
neighbors[u].add(v)
neighbors[v].add(u)
for u, v in zip(ind1, ind1[1:]) + [(ind1[-1], ind1[0])]:
neighbors[u].add(v)
neighbors[v].add(u)
if random() < 0.5:
current = ind0[0]
else:
current = ind1[0]
child = [current]
seen = set([current])
while len(child) < len(ind0):
for n in neighbors:
if current in n:
n.remove(current)
smallest = None
smallest_n = None
for n in neighbors[current]:
if len(neighbors[n]) < smallest_n or smallest_n == None:
smallest = n
smallest_n = len(neighbors[n])
current = smallest
if current == None:
current = choice([i for i in xrange(len(ind0)) if i not in seen])
child.append(current)
seen.add(current)
assert(len(child) == len(ind0))
return child
##### mutators #####
def swap_mutator(xs):
x0, x1 = rand_index_pair(0, len(xs) - 1)
xs[x0], xs[x1] = xs[x1], xs[x0]
return xs
def inversion_mutator(xs):
x0, x1 = rand_index_pair(0, len(xs) - 1)
xs[x0:x1 + 1] = list(reversed(xs[x0:x1 + 1]))
return xs
##### analyze #####
def analyze_population(iteration, maxiter, population, children):
min_ind = min(population, key=snd)[1]
max_ind = max(population, key=snd)[1]
mean = sum(map(snd, population)) / float(len(population))
variance = sum((x[1] - mean)**2 for x in population) / float(len(population) - 1)
print "{0:{2}d}/{1} ({3}), min={4}, max={5}, mean={6:.2f}, dev={7:.2f} ({8} unique childs, {9} childs + parents)".format(
iteration, maxiter, len(str(maxiter)),
len(population), min_ind, max_ind, mean, sqrt(variance), len(set(tuple(xs) for xs in children)),
len(set([tuple(xs) for xs in children])) + len(set([tuple(xs) for (xs, _) in population])))
def tspEA(instance,
crossover_selection,
crossover,
mutation,
ngen_selection,
num_parents,
num_children,
mutation_rate,
maxiter,
elitism):
# initialize population
population = []
for _ in xrange(num_parents):
xs = range(len(instance))
shuffle(xs)
population.append((xs, instance.tour_length(xs)))
# start loop
best_individuum = min(population, key=snd)
print (0, best_individuum)
for iteration in xrange(1, maxiter + 1):
old_best = best_individuum
#print "iteration {0}".format(iteration)
tmp_children = []
# select pairs for crossover + children
for ind0, ind1 in grouper(2, crossover_selection(population, 2 * num_children)):
child = crossover(ind0[0], ind1[0])
tmp_children.append(child)
# mutate
for child in tmp_children:
if random() < mutation_rate:
mutation(child)
#analyze_population(iteration, maxiter, population, tmp_children)
# evaluation
for child in tmp_children:
child_fitness = instance.tour_length(child)
individuum = (child, child_fitness)
if child_fitness < best_individuum[1]:
best_individuum = individuum
population.append((child, child_fitness))
#print len(population)
# select new population
new_population = []
seen_individuums = set()
if elitism is True:
new_population = [best_individuum]
seen_individuums.add(tuple(best_individuum[0]))
tries = 0
while len(new_population) < num_parents:
if tries >= 20:
break
tries += 1
for individuum in ngen_selection(population, 2 * num_parents):
if len(new_population) >= num_parents:
break
hashable = tuple(individuum[0])
if hashable not in seen_individuums:
seen_individuums.add(hashable)
new_population.append(individuum)
population = new_population
if best_individuum != old_best:
print (iteration, best_individuum)
print best_individuum
if __name__ == '__main__':
import tsp
import argparse
instance = tsp.TSP.from_file("data/TSP/ulysses16edit.tsp")
tspEA(instance=instance,
crossover_selection=tournament_selection,
crossover=one_point_crossover,
mutation=swap_mutator,
ngen_selection=tournament_selection,
num_parents=50,
num_children=50,
mutation_rate=0.05,
maxiter=1000,
elitism=True)
| Python |
# Django settings for fereol project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# calculated paths for django and the site
# used as starting points for various other paths
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Warsaw'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'pl'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static/'
STATIC_URL = '/media/'
# Path to static media root directory.
STATIC_DOC_ROOT = os.path.join(SITE_ROOT, 'static')
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin_media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = ')=vxy+dcod%6#dm^-@!)pl5e9p88&dp8un905)_5w1$k$gdsqe'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'fereol.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'fereol',
)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import patterns
from django.conf import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'fereol.views.index'),
(r'^przedmiot/P(?P<id>\d+)$', 'fereol.views.przedmiot'),
(r'^aktualnosci$', 'fereol.views.aktualnosci'),
(r'^pracownicy$', 'fereol.views.pracownicy'),
(r'^przedmioty$', 'fereol.views.przedmioty'),
(r'^erlang$', 'fereol.views.erlang'),
(r'^algebra$', 'fereol.views.algebra'),
(r'^automaty$', 'fereol.views.automaty'),
(r'^lista$', 'fereol.views.lista'),
(r'^plan$', 'fereol.views.plan'),
# Example:
# (r'^fereol/', include('fereol.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_DOC_ROOT}),)
| Python |
from django.shortcuts import render_to_response
def index(request):
return render_to_response("index.html", {})
def aktualnosci(request):
return render_to_response("aktualnosci.html", {})
def pracownicy(request):
return render_to_response("pracownicy.html", {})
def przedmioty(request):
return render_to_response("przedmioty.html", {})
def przedmiot(request, id):
if id == '01':
return render_to_response("algebra.html", {})
elif id == '17':
return render_to_response("erlang.html", {})
elif id == '60':
return render_to_response("automaty.html", {})
else:
return render_to_response("przykladowy.html", {})
def erlang(request):
return render_to_response('erlang.html', {})
def algebra(request):
return render_to_response('algebra.html', {})
def automaty(request):
return render_to_response('automaty.html', {})
def lista(request):
return render_to_response('lista.json', {})
def plan(request):
return render_to_response('plan.html', {})
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
class MainHandler(webapp.RequestHandler):
def get(self):
self.response.out.write('Hello world!')
def main():
application = webapp.WSGIApplication([('/', MainHandler)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
class MainHandler(webapp.RequestHandler):
def get(self):
self.response.out.write('Hello world!')
def main():
application = webapp.WSGIApplication([('/', MainHandler)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
class MainHandler(webapp.RequestHandler):
def get(self):
self.response.out.write('Hello world!')
def main():
application = webapp.WSGIApplication([('/', MainHandler)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
class MainHandler(webapp.RequestHandler):
def get(self):
self.response.out.write('Hello world!')
def main():
application = webapp.WSGIApplication([('/', MainHandler)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
from google.appengine.ext import db
class Greeting(db.Model):
author = db.UserProperty()
content = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
class Menu(db.Model):
text = db.StringProperty(multiline=False)
link = db.StringProperty(multiline=False)
| Python |
import os
from google.appengine.ext.webapp import template
import cgi
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from model import *
class MainPage(webapp.RequestHandler):
def get(self):
greetings_query = Greeting.all().order('-date')
greetings = greetings_query.fetch(10)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'greetings': greetings,
'url': url,
'url_linktext': url_linktext,
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
class PutGuestBook(webapp.RequestHandler):
def post(self):
greeting = Greeting()
if users.get_current_user():
greeting.author = users.get_current_user()
greeting.content = self.request.get('content')
greeting.put()
self.redirect('/GetGuest')
class GuestBook(webapp.RequestHandler):
def get(self):
template_values = {}
path = os.path.join(os.path.dirname(__file__), 'Guestbook.html')
self.response.out.write(template.render(path, template_values))
class GetGuest(webapp.RequestHandler):
def get(self):
greetings_query = Greeting.all().order('-date')
greetings = greetings_query.fetch(10)
rp = ""
for greeting in greetings:
if greeting.author:
rp += "<div class='guestname'>" + greeting.author.nickname + "</div>"
else:
rp += "<div class='guestname'>An anonymous person</div>"
content = greeting.content.replace("<","<").replace(">",">")
rp += "<div class='guestcontent'>" + content +"</div>"
self.response.out.write(rp)
class MyInformation(webapp.RequestHandler):
def get(self):
template_values = {}
path = os.path.join(os.path.dirname(__file__), 'MyInformation.html')
self.response.out.write(template.render(path, template_values))
application = webapp.WSGIApplication(
[('/', MainPage),
('/GuestBook', GuestBook),
('/GetGuest', GetGuest),
('/PutGuestBook', PutGuestBook),
('/MyInformation', MyInformation)
],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
# -*- coding: utf-8 -*-
# Django settings for the example project.
import os
DEBUG = True
TEMPLATE_DEBUG = False
##LANGUAGE_CODE = 'zh-CN'
##LANGUAGE_CODE = 'fr'
LOCALE_PATHS = 'locale'
USE_I18N = True
TEMPLATE_LOADERS=('django.template.loaders.filesystem.load_template_source',
'ziploader.zip_loader.load_template_source') | Python |
# -*- coding: utf-8 -*-
import os,stat
import sys
import logging
import wsgiref.handlers
from mimetypes import types_map
from datetime import datetime, timedelta
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api import memcache
from google.appengine.ext.zipserve import *
sys.path.append('modules')
from model import *
# {{{ Handlers
cwd = os.getcwd()
theme_path = os.path.join(cwd, 'themes')
file_modifieds={}
max_age = 600 #expires in 10 minutes
def Error404(handler):
handler.response.set_status(404)
html = template.render(os.path.join(cwd,'views/404.html'), {'error':404})
handler.response.out.write(html)
class GetFile(webapp.RequestHandler):
def get(self,prefix,name):
request_path = self.request.path[8:]
server_path = os.path.normpath(os.path.join(cwd, 'themes', request_path))
try:
fstat=os.stat(server_path)
except:
#use zipfile
theme_file=os.path.normpath(os.path.join(cwd, 'themes', prefix))
if os.path.exists(theme_file+".zip"):
#is file exist?
fstat=os.stat(theme_file+".zip")
zipdo=ZipHandler()
zipdo.initialize(self.request,self.response)
return zipdo.get(theme_file,name)
else:
Error404(self)
return
fmtime=datetime.fromtimestamp(fstat[stat.ST_MTIME])
if self.request.if_modified_since and self.request.if_modified_since.replace(tzinfo=None) >= fmtime:
self.response.headers['Date'] = format_date(datetime.utcnow())
self.response.headers['Last-Modified'] = format_date(fmtime)
cache_expires(self.response, max_age)
self.response.set_status(304)
self.response.clear()
elif server_path.startswith(theme_path):
ext = os.path.splitext(server_path)[1]
if types_map.has_key(ext):
mime_type = types_map[ext]
else:
mime_type = 'application/octet-stream'
try:
self.response.headers['Content-Type'] = mime_type
self.response.headers['Last-Modified'] = format_date(fmtime)
cache_expires(self.response, max_age)
self.response.out.write(open(server_path, 'rb').read())
except Exception, e:
Error404(self)
else:
Error404(self)
class NotFound(webapp.RequestHandler):
def get(self):
Error404(self)
#}}}
def format_date(dt):
return dt.strftime('%a, %d %b %Y %H:%M:%S GMT')
def cache_expires(response, seconds=0, **kw):
"""
Set expiration on this request. This sets the response to
expire in the given seconds, and any other attributes are used
for cache_control (e.g., private=True, etc).
this function is modified from webob.Response
it will be good if google.appengine.ext.webapp.Response inherits from this class...
"""
if not seconds:
# To really expire something, you have to force a
# bunch of these cache control attributes, and IE may
# not pay attention to those still so we also set
# Expires.
response.headers['Cache-Control'] = 'max-age=0, must-revalidate, no-cache, no-store'
response.headers['Expires'] = format_date(datetime.utcnow())
if 'last-modified' not in self.headers:
self.last_modified = format_date(datetime.utcnow())
response.headers['Pragma'] = 'no-cache'
else:
response.headers['Cache-Control'] = 'max-age=%d' % seconds
response.headers['Expires'] = format_date(datetime.utcnow() + timedelta(seconds=seconds))
def main():
application = webapp.WSGIApplication(
[
('/themes/[\\w\\-]+/templates/.*', NotFound),
('/themes/(?P<prefix>[\\w\\-]+)/(?P<name>.+)', GetFile),
('.*', NotFound),
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
import optparse
import os
import sys
def compile_messages(locale=None):
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print "This script should be run from the Django SVN tree or your project or app tree."
sys.exit(1)
if locale is not None:
basedir = os.path.join(basedir, locale, 'LC_MESSAGES')
for dirpath, dirnames, filenames in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
os.environ['djangocompilemo'] = pf + '.mo'
os.environ['djangocompilepo'] = pf + '.po'
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgfmt -o "%djangocompilemo%" "%djangocompilepo%"'
else:
cmd = 'msgfmt -o "$djangocompilemo" "$djangocompilepo"'
os.system(cmd)
def main():
parser = optparse.OptionParser()
parser.add_option('-l', '--locale', dest='locale',
help="The locale to process. Default is to process all.")
options, args = parser.parse_args()
if len(args):
parser.error("This program takes no arguments")
compile_messages(options.locale)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
import os
import sys
def unique_messages():
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print "this script should be run from the django svn tree or your project or app tree"
sys.exit(1)
for (dirpath, dirnames, filenames) in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
cmd = 'msguniq "%s.po"' % pf
stdout = os.popen(cmd)
msg = stdout.read()
open('%s.po' % pf, 'w').write(msg)
if __name__ == "__main__":
unique_messages()
| Python |
#!/usr/bin/env python
# Need to ensure that the i18n framework is enabled
from django.conf import settings
settings.configure(USE_I18N = True)
from django.utils.translation import templatize
import re
import os
import sys
import getopt
pythonize_re = re.compile(r'\n\s*//')
def make_messages():
localedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
print "This script should be run from the django svn tree or your project or app tree."
print "If you did indeed run it from the svn checkout or your project or application,"
print "maybe you are just missing the conf/locale (in the django tree) or locale (for project"
print "and application) directory?"
print "make-messages.py doesn't create it automatically, you have to create it by hand if"
print "you want to enable i18n for your project or application."
sys.exit(1)
(opts, args) = getopt.getopt(sys.argv[1:], 'l:d:va')
lang = None
domain = 'django'
verbose = False
all = False
for o, v in opts:
if o == '-l':
lang = v
elif o == '-d':
domain = v
elif o == '-v':
verbose = True
elif o == '-a':
all = True
if domain not in ('django', 'djangojs'):
print "currently make-messages.py only supports domains 'django' and 'djangojs'"
sys.exit(1)
if (lang is None and not all) or domain is None:
print "usage: make-messages.py -l <language>"
print " or: make-messages.py -a"
sys.exit(1)
languages = []
if lang is not None:
languages.append(lang)
elif all:
languages = [el for el in os.listdir(localedir) if not el.startswith('.')]
for lang in languages:
print "processing language", lang
basedir = os.path.join(localedir, lang, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
for (dirpath, dirnames, filenames) in os.walk("."):
for file in filenames:
if domain == 'djangojs' and file.endswith('.js'):
if verbose: sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
src = open(os.path.join(dirpath, file), "rb").read()
src = pythonize_re.sub('\n#', src)
open(os.path.join(dirpath, '%s.py' % file), "wb").write(src)
thefile = '%s.py' % file
cmd = 'xgettext %s -d %s -L Perl --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy --from-code UTF-8 -o - "%s"' % (
os.path.exists(potfile) and '--omit-header' or '', domain, os.path.join(dirpath, thefile))
(stdin, stdout, stderr) = os.popen3(cmd, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running xgettext on %s" % file
print errors
sys.exit(8)
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if msgs:
open(potfile, 'ab').write(msgs)
os.unlink(os.path.join(dirpath, thefile))
elif domain == 'django' and (file.endswith('.py') or file.endswith('.html')):
thefile = file
if file.endswith('.html'):
src = open(os.path.join(dirpath, file), "rb").read()
open(os.path.join(dirpath, '%s.py' % file), "wb").write(templatize(src))
thefile = '%s.py' % file
if verbose: sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
cmd = 'xgettext %s -d %s -L Python --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy --from-code UTF-8 -o - "%s"' % (
os.path.exists(potfile) and '--omit-header' or '', domain, os.path.join(dirpath, thefile))
(stdin, stdout, stderr) = os.popen3(cmd, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running xgettext on %s" % file
print errors
sys.exit(8)
if thefile != file:
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if msgs:
open(potfile, 'ab').write(msgs)
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
(stdin, stdout, stderr) = os.popen3('msguniq "%s"' % potfile, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running msguniq"
print errors
sys.exit(8)
open(potfile, 'w').write(msgs)
if os.path.exists(pofile):
(stdin, stdout, stderr) = os.popen3('msgmerge -q "%s" "%s"' % (pofile, potfile), 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running msgmerge"
print errors
sys.exit(8)
open(pofile, 'wb').write(msgs)
os.unlink(potfile)
if __name__ == "__main__":
make_messages()
| Python |
#!/usr/bin/env python
import optparse
import os
import sys
def compile_messages(locale=None):
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print "This script should be run from the Django SVN tree or your project or app tree."
sys.exit(1)
if locale is not None:
basedir = os.path.join(basedir, locale, 'LC_MESSAGES')
for dirpath, dirnames, filenames in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
os.environ['djangocompilemo'] = pf + '.mo'
os.environ['djangocompilepo'] = pf + '.po'
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgfmt -o "%djangocompilemo%" "%djangocompilepo%"'
else:
cmd = 'msgfmt -o "$djangocompilemo" "$djangocompilepo"'
os.system(cmd)
def main():
parser = optparse.OptionParser()
parser.add_option('-l', '--locale', dest='locale',
help="The locale to process. Default is to process all.")
options, args = parser.parse_args()
if len(args):
parser.error("This program takes no arguments")
compile_messages(options.locale)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
# Need to ensure that the i18n framework is enabled
from django.conf import settings
settings.configure(USE_I18N = True)
from django.utils.translation import templatize
import re
import os
import sys
import getopt
pythonize_re = re.compile(r'\n\s*//')
def make_messages():
localedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
print "This script should be run from the django svn tree or your project or app tree."
print "If you did indeed run it from the svn checkout or your project or application,"
print "maybe you are just missing the conf/locale (in the django tree) or locale (for project"
print "and application) directory?"
print "make-messages.py doesn't create it automatically, you have to create it by hand if"
print "you want to enable i18n for your project or application."
sys.exit(1)
(opts, args) = getopt.getopt(sys.argv[1:], 'l:d:va')
lang = None
domain = 'django'
verbose = False
all = False
for o, v in opts:
if o == '-l':
lang = v
elif o == '-d':
domain = v
elif o == '-v':
verbose = True
elif o == '-a':
all = True
if domain not in ('django', 'djangojs'):
print "currently make-messages.py only supports domains 'django' and 'djangojs'"
sys.exit(1)
if (lang is None and not all) or domain is None:
print "usage: make-messages.py -l <language>"
print " or: make-messages.py -a"
sys.exit(1)
languages = []
if lang is not None:
languages.append(lang)
elif all:
languages = [el for el in os.listdir(localedir) if not el.startswith('.')]
for lang in languages:
print "processing language", lang
basedir = os.path.join(localedir, lang, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
for (dirpath, dirnames, filenames) in os.walk("."):
for file in filenames:
if domain == 'djangojs' and file.endswith('.js'):
if verbose: sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
src = open(os.path.join(dirpath, file), "rb").read()
src = pythonize_re.sub('\n#', src)
open(os.path.join(dirpath, '%s.py' % file), "wb").write(src)
thefile = '%s.py' % file
cmd = 'xgettext %s -d %s -L Perl --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy --from-code UTF-8 -o - "%s"' % (
os.path.exists(potfile) and '--omit-header' or '', domain, os.path.join(dirpath, thefile))
(stdin, stdout, stderr) = os.popen3(cmd, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running xgettext on %s" % file
print errors
sys.exit(8)
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if msgs:
open(potfile, 'ab').write(msgs)
os.unlink(os.path.join(dirpath, thefile))
elif domain == 'django' and (file.endswith('.py') or file.endswith('.html')):
thefile = file
if file.endswith('.html'):
src = open(os.path.join(dirpath, file), "rb").read()
open(os.path.join(dirpath, '%s.py' % file), "wb").write(templatize(src))
thefile = '%s.py' % file
if verbose: sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
cmd = 'xgettext %s -d %s -L Python --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy --from-code UTF-8 -o - "%s"' % (
os.path.exists(potfile) and '--omit-header' or '', domain, os.path.join(dirpath, thefile))
(stdin, stdout, stderr) = os.popen3(cmd, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running xgettext on %s" % file
print errors
sys.exit(8)
if thefile != file:
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if msgs:
open(potfile, 'ab').write(msgs)
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
(stdin, stdout, stderr) = os.popen3('msguniq "%s"' % potfile, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running msguniq"
print errors
sys.exit(8)
open(potfile, 'w').write(msgs)
if os.path.exists(pofile):
(stdin, stdout, stderr) = os.popen3('msgmerge -q "%s" "%s"' % (pofile, potfile), 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running msgmerge"
print errors
sys.exit(8)
open(pofile, 'wb').write(msgs)
os.unlink(potfile)
if __name__ == "__main__":
make_messages()
| Python |
#!/usr/bin/env python
import os
import sys
def unique_messages():
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print "this script should be run from the django svn tree or your project or app tree"
sys.exit(1)
for (dirpath, dirnames, filenames) in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
cmd = 'msguniq "%s.po"' % pf
stdout = os.popen(cmd)
msg = stdout.read()
open('%s.po' % pf, 'w').write(msg)
if __name__ == "__main__":
unique_messages()
| Python |
from micolog_plugin import *
import logging
from model import *
from google.appengine.api import users
class highsyntax(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="HighSyntax Plugin."
self.name="HighSyntax plugin"
self.version="0.1"
self.register_filter('footer',self.footer)
self.register_urlzip('/syntaxhighlighter/(.*)','syntaxhighlighter.zip')
self.theme=OptionSet.getValue("highsyntax_theme",default="Default")
def footer(self,content,blog=None,*arg1,**arg2):
return content+'''
<script type="text/javascript">
if ($('pre[class^=brush:]').length > 0)
{
$.getScript("/syntaxhighlighter/scripts/shCore.js", function() {
SyntaxHighlighter.boot("/syntaxhighlighter/", {theme : "'''+str(self.theme)+'''", stripBrs : true}, {});
});
}
</script>
'''
def get(self,page):
return '''<h3>HighSyntax Plugin</h3>
<p>HighSyntax plugin for micolog.</p>
<p>This plugin based on <a href="http://alexgorbatchev.com/wiki/SyntaxHighlighter" target="_blank">SyntaxHighlighter</a>
and <a href="http://www.outofwhatbox.com/blog/syntaxhighlighter-downloads/" target="_blank">SyntaxHighlighter.boot()</a></p>
<form action="" method="post">
<p><B>Require:</B>
<ol>
<li><b>{%mf footer%} </b>in template "base.html".</li>
<li><a href="http://jquery.org" target="_blank">Jquery</a> version 1.3.2 or new.</li>
</ol>
</p>
<p><b>Theme:</b>
</p>
<p>
<select name="theme" id="theme">
<option value="Default">Default</option>
<option value="Django">Django</option>
<option value="Eclipse">Eclipse</option>
<option value="Emacs">Emacs</option>
<option value="FadeToGrey">FadeToGrey</option>
<option value="Midnight">Midnight</option>
<option value="RDark">RDark</option>
</select>
</p>
<p>
<input type="submit" value="submit">
</p>
</form>
<script>
$("#theme").val("'''+str(self.theme)+'''");</script>
'''
def post(self,page):
self.theme=page.param("theme")
OptionSet.setValue("highsyntax_theme",self.theme)
return self.get(page) | Python |
from highsyntax import * | Python |
from micolog_plugin import *
from model import OptionSet
class googleAnalytics(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="Plugin for put google Analytics into micolog."
self.name="google Analytics"
self.version="0.1"
self.register_filter('footer',self.filter)
def filter(self,content,*arg1,**arg2):
code=OptionSet.getValue("googleAnalytics_code",default="")
return content+str(code)
def get(self,page):
code=OptionSet.getValue("googleAnalytics_code",default="")
return '''<h3>Google Anslytics</h3>
<form action="" method="post">
<p>Analytics Code:</p>
<textarea name="code" style="width:500px;height:100px">%s</textarea>
<br>
<input type="submit" value="submit">
</form>'''%code
def post(self,page):
code=page.param("code")
OptionSet.setValue("googleAnalytics_code",code)
return self.get(page)
| Python |
from xheditor import * | Python |
from micolog_plugin import *
import logging,os
from model import *
from google.appengine.api import users
class xheditor(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="xheditor."
self.name="xheditor plugin"
self.version="0.1"
self.register_urlzip('/xheditor/(.*)','xheditor.zip')
self.register_filter('editor_header',self.head)
def head(self,content,blog=None,*arg1,**arg2):
if blog.language=='zh_CN':
js='xheditor-zh-cn.js'
else:
js='xheditor-en.js'
sret='''<script type="text/javascript" src="/xheditor/%s"></script>
<script type="text/javascript">
$(function(){
$("#content").xheditor(true,{
upImgUrl:'!/admin/uploadex?ext=jpg|png|jpeg|gif',
upFlashUrl:'!/admin/uploadex?ext=swf',
upMediaUrl:'!/admin/uploadex?ext=wmv|avi|wma|mp3|mid'});
});
</script>'''%js
return sret
def get(self,page):
return '''<h3>xheditor Plugin </h3>
<p>This is a demo for write editor plugin.</p>
<h4>feature</h4>
<p><ol>
<li>Change editor as xheditor.</li>
</ol></p>
'''
| Python |
from sys_plugin import * | Python |
# -*- coding: utf-8 -*-
from micolog_plugin import *
import logging,re
from google.appengine.api import mail
from model import *
from google.appengine.api import users
from base import BaseRequestHandler,urldecode
from google.appengine.ext.webapp import template
SBODY='''New comment on your post "%(title)s"
Author : %(author)s
E-mail : %(email)s
URL : %(weburl)s
Comment:
%(content)s
You can see all comments on this post here:
%(commenturl)s
'''
BBODY='''Hi~ New reference on your comment for post "%(title)s"
Author : %(author)s
URL : %(weburl)s
Comment:
%(content)s
You can see all comments on this post here:
%(commenturl)s
'''
class NotifyHandler(BaseRequestHandler):
def __init__(self):
BaseRequestHandler.__init__(self)
self.current="config"
self.sbody=OptionSet.getValue('sys_plugin_sbody',SBODY)
self.bbody=OptionSet.getValue('sys_plugin_bbody',BBODY)
def get(self):
self.template_vals.update({'self':self})
content=template.render('plugins/sys_plugin/setup.html',self.template_vals)
self.render2('views/admin/setup_base.html',{'m_id':'sysplugin_notify','content':content})
#Also you can use:
#self.render2('plugins/sys_plugin/setup2.html',{'m_id':'sysplugin_notify','self':self})
def post(self):
self.bbody=self.param('bbody')
self.sbody=self.param('sbody')
self.blog.comment_notify_mail=self.parambool('comment_notify_mail')
self.blog.put()
OptionSet.setValue('sys_plugin_sbody',self.sbody)
OptionSet.setValue('sys_plugin_bbody',self.bbody)
self.get()
class sys_plugin(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="System plugin for micolog"
self.name="Sys Plugin"
self.version="0.2"
self.blocklist=OptionSet.getValue("sys_plugin_blocklist",default="")
self.register_filter('head',self.head)
self.register_filter('footer',self.footer)
self.register_urlmap('sys_plugin/setup',self.setup)
self.register_urlhandler('/admin/sys_plugin/notify',NotifyHandler)
self.register_setupmenu('sysplugin_notify',_('Notify'),'/admin/sys_plugin/notify')
self.register_action('pre_comment',self.pre_comment)
self.register_action('save_comment',self.save_comment)
self.sbody=OptionSet.getValue('sys_plugin_sbody',SBODY)
self.bbody=OptionSet.getValue('sys_plugin_bbody',BBODY)
def head(self,content,blog=None,*arg1,**arg2):
content=content+'<meta name="generator" content="Micolog %s" />'%blog.version
return content
def footer(self,content,blog=None,*arg1,**arg2):
return content+'<!--Powered by micolog %s-->'%blog.version
def setup(self,page=None,*arg1,**arg2):
if not page.is_login:
page.redirect(users.create_login_url(page.request.uri))
tempstr='''
<p>blocklist:</p>
<form action="" method="post">
<p>
<textarea name="ta_list" style="width:400px;height:300px">%s</textarea>
</p>
<input type="submit" value="submit">
</form>'''
if page.request.method=='GET':
page.render2('views/admin/base.html',{'m_id':'sysplugin_block','content':tempstr%self.blocklist})
else:
self.blocklist=page.param("ta_list")
OptionSet.setValue("sys_plugin_blocklist",self.blocklist)
page.render2('views/admin/base.html',{'m_id':'sysplugin_block','content':tempstr%self.blocklist})
def get(self,page):
return '''<h3>Sys Plugin</h3>
<p>This is a system plugin for micolog. <br>Also a demo for how to write plugin for micolog.</p>
<h4>feature</h4>
<p><ol>
<li>Add Meta <meta name="generator" content="Micolog x.x" /></li>
<li>Add footer "<!--Powered by micolog x.x-->"</li>
<li>Comments Filter with blocklist <a href="/e/sys_plugin/setup">Setup</a></li>
<li>Comment Notify <a href="/admin/sys_plugin/notify">Setup</a></li>
</ol></p>
'''
def pre_comment(self,comment,*arg1,**arg2):
for s in self.blocklist.splitlines():
if comment.content.find(s)>-1:
raise Exception
def save_comment(self,comment,*arg1,**arg2):
if self.blog.comment_notify_mail:
self.notify(comment)
def notify(self,comment):
sbody=self.sbody.decode('utf-8')
bbody=self.bbody.decode('utf-8')
if self.blog.comment_notify_mail and self.blog.owner and not users.is_current_user_admin() :
sbody=sbody%{'title':comment.entry.title,
'author':comment.author,
'weburl':comment.weburl,
'email':comment.email,
'content':comment.content,
'commenturl':comment.entry.fullurl+"#comment-"+str(comment.key().id())
}
mail.send_mail_to_admins(self.blog.owner.email(),'Comments:'+comment.entry.title, sbody,reply_to=comment.email)
#reply comment mail notify
refers = re.findall(r'#comment-(\d+)', comment.content)
if len(refers)!=0:
replyIDs=[int(a) for a in refers]
commentlist=comment.entry.comments()
emaillist=[c.email for c in commentlist if c.reply_notify_mail and c.key().id() in replyIDs]
emaillist = {}.fromkeys(emaillist).keys()
for refer in emaillist:
if self.blog.owner and mail.is_email_valid(refer):
emailbody = bbody%{'title':comment.entry.title,
'author':comment.author,
'weburl':comment.weburl,
'email':comment.email,
'content':comment.content,
'commenturl':comment.entry.fullurl+"#comment-"+str(comment.key().id())
}
message = mail.EmailMessage(sender = self.blog.owner.email(),subject = 'Comments:'+comment.entry.title)
message.to = refer
message.body = emailbody
message.send()
| Python |
from micolog_plugin import *
from google.appengine.api import memcache
from google.appengine.api.labs import taskqueue
from wp_import import *
from model import *
import logging,math
from django.utils import simplejson
from base import BaseRequestHandler,urldecode
class waphandler(BaseRequestHandler):
def get(self):
if not self.is_login:
self.redirect(users.create_login_url(self.request.uri))
action=self.param('action')
if action=='stop':
memcache.delete("imt")
#OptionSet.remove('wpimport_data')
self.write('"ok"')
return
imt=memcache.get('imt')
#imt=OptionSet.getValue('wpimport_data')
if imt and imt.cur_do:
process=100-math.ceil(imt.count()*100/imt.total)
if imt.cur_do[0]=='cat':
msg="importing category '%s'"%imt.cur_do[1]['name']
elif imt.cur_do[0]=='entry':
msg="importing entry '%s'"%imt.cur_do[1]['title']
else:
msg="start importing..."
self.write(simplejson.dumps((process,msg,not process==100)))
else:
self.write(simplejson.dumps((-1,"Have no data to import!",False)))
def post(self):
if not self.is_login:
self.redirect(users.create_login_url(self.request.uri))
try:
#global imt
imt=memcache.get("imt")
#imt=OptionSet.getValue('wpimport_data')
import_data=imt.pop()
#if tdata=='men':
memcache.set('imt',imt)
#else:
# OptionSet.setValue('wpimport_data',imt)
if import_data:
try:
if import_data[0]=='cat':
_cat=import_data[1]
nicename=_cat['slug']
cat=Category.get_by_key_name(nicename)
if not cat:
cat=Category(key_name=nicename)
cat.name=_cat['name']
cat.slug=nicename
cat.put()
elif import_data[0]=='entry':
_entry=import_data[1]
logging.debug('importing:'+_entry['title'])
hashkey=str(hash(_entry['title']))
entry=Entry.get_by_key_name(hashkey)
if not entry:
entry=Entry(key_name=hashkey)
entry.title=_entry['title']
entry.author=self.login_user
entry.is_wp=True
#entry.date=datetime.strptime( _entry['pubDate'],"%a, %d %b %Y %H:%M:%S +0000")
try:
entry.date=datetime.strptime( _entry['pubDate'][:-6],"%a, %d %b %Y %H:%M:%S")
except:
try:
entry.date=datetime.strptime( _entry['pubDate'][0:19],"%Y-%m-%d %H:%M:%S")
except:
entry.date=datetime.now()
entry.entrytype=_entry['post_type']
entry.content=_entry['content']
entry.excerpt=_entry['excerpt']
entry.post_id=_entry['post_id']
entry.slug=urldecode(_entry['post_name'])
entry.entry_parent=_entry['post_parent']
entry.menu_order=_entry['menu_order']
for cat in _entry['categories']:
c=Category.get_by_key_name(cat['slug'])
if c:
entry.categorie_keys.append(c.key())
entry.settags(','.join(_entry['tags']))
## for tag in _entry['tags']:
## entry.tags.append(tag)
if _entry['published']:
entry.save(True)
else:
entry.save()
for com in _entry['comments']:
try:
date=datetime.strptime(com['date'][0:19],"%Y-%m-%d %H:%M:%S")
except:
date=datetime.now()
comment=Comment(author=com['author'],
content=com['content'],
entry=entry,
date=date
)
try:
comment.email=com['email']
comment.weburl=com['weburl']
except:
pass
comment.save()
finally:
queue=taskqueue.Queue("import")
queue.add(taskqueue.Task( url="/admin/wp_import"))
except Exception,e :
logging.info("import error: %s"%e.message)
class wordpress(Plugin_importbase):
def __init__(self):
Plugin_importbase.__init__(self,__file__,"wordpress","Import posts, pages, comments, categories, and tags from a WordPress export file.")
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="Plugin for import wxr file."
self.name="Wordpress Import"
self.version="0.7"
self.register_urlhandler('/admin/wp_import',waphandler)
def get(self,page):
return self.render_content("wpimport.html",{'name':self.name})
def post(self,page):
try:
queue=taskqueue.Queue("import")
wpfile=page.param('wpfile')
#global imt
imt=import_wordpress(wpfile)
imt.parse()
#OptionSet.setValue('wpimport_data',imt)
memcache.set("imt",imt)
queue.add(taskqueue.Task( url="/admin/wp_import"))
return self.render_content("wpimport.html",{'postback':True})
except Exception , e:
return self.error("Import Error:<p style='color:red;font-size:11px;font-weight:normal'>%s</p>"%e.message)
| Python |
###Import post,page,category,tag from wordpress export file
import xml.etree.ElementTree as et
import logging
###import from wxr file
class import_wordpress:
def __init__(self,source):
self.categories=[]
self.tags=[]
self.entries=[]
self.source=source
self.doc=et.fromstring(source)
#use namespace
self.wpns='{http://wordpress.org/export/1.0/}'
self.contentns="{http://purl.org/rss/1.0/modules/content/}"
self.excerptns="{http://wordpress.org/export/1.0/excerpt/}"
et._namespace_map[self.wpns]='wp'
et._namespace_map[self.contentns]='content'
et._namespace_map[self.excerptns]='excerpt'
self.channel=self.doc.find('channel')
self.dict={'category':self.wpns+'category','tag':self.wpns+'tag','item':'item'}
self.cur_do=None
def parse(self):
categories=self.channel.findall(self.wpns+'category')
#parse categories
for cate in categories:
slug=cate.findtext(self.wpns+'category_nicename')
name=cate.findtext(self.wpns+'cat_name')
self.categories.append({'slug':slug,'name':name})
#parse tags
tags=self.channel.findall(self.wpns+'tag')
for tag in tags:
slug=tag.findtext(self.wpns+'tag_slug')
name=tag.findtext(self.wpns+'tag_name')
self.tags.append({'slug':slug,'name':name})
#parse entries
items=self.channel.findall('item')
for item in items:
title=item.findtext('title')
try:
entry={}
entry['title']=item.findtext('title')
logging.info(title)
entry['pubDate']=item.findtext('pubDate')
entry['post_type']=item.findtext(self.wpns+'post_type')
entry['content']= item.findtext(self.contentns+'encoded')
entry['excerpt']= item.findtext(self.excerptns+'encoded')
entry['post_id']=int(item.findtext(self.wpns+'post_id'))
entry['post_name']=item.findtext(self.wpns+'post_name')
entry['post_parent']=int(item.findtext(self.wpns+'post_parent'))
entry['menu_order']=int(item.findtext(self.wpns+'menu_order'))
entry['tags']=[]
entry['categories']=[]
cats=item.findall('category')
for cat in cats:
if cat.attrib.has_key('nicename'):
nicename=cat.attrib['nicename']
cat_type=cat.attrib['domain']
if cat_type=='tag':
entry['tags'].append(cat.text)
else:
entry['categories'].append({'slug':nicename,'name':cat.text})
pub_status=item.findtext(self.wpns+'status')
if pub_status=='publish':
entry['published']=True
else:
entry['published']=False
entry['comments']=[]
comments=item.findall(self.wpns+'comment')
for com in comments:
try:
comment_approved=int(com.findtext(self.wpns+'comment_approved'))
except:
comment_approved=0
if comment_approved:
comment=dict(author=com.findtext(self.wpns+'comment_author'),
content=com.findtext(self.wpns+'comment_content'),
email=com.findtext(self.wpns+'comment_author_email'),
weburl=com.findtext(self.wpns+'comment_author_url'),
date=com.findtext(self.wpns+'comment_date')
)
self.entries.append(entry)
except:
logging.info("parse wordpress file error")
self.total=self.count()
self.cur_do=("begin","begin")
self.source=None
self.doc=None
def count(self):
return len(self.categories)+len(self.entries)
def pop(self):
if len(self.categories)>0:
self.cur_do=('cat',self.categories.pop())
return self.cur_do
if len(self.entries)>0:
self.cur_do=('entry', self.entries.pop())
return self.cur_do
return None
def __getstate__(self):
if self.cur_do[0]=='cat':
c=('cat',self.cur_do[1]['name'])
elif self.cur_do[0]=='entry':
c=('entry',self.cur_do[1]['title'])
else:
c=('begin','begin')
return (c,self.total,self.categories,self.tags,self.entries)
def __setstate__(self,data):
c=data[0]
if c[0]=='cat':
self.cur_do=('cat',{'name':c[1]})
elif c[0]=='entry':
self.cur_do=('entry',{'title':c[1]})
else:
self.cur_do=c
self.total,self.categories,self.tags,self.entries=data[1:]
if __name__=='__main__':
import sys
#f=sys.argv[1]
f='D:\\work\\micolog\\wordpress.xml'
wp=import_wordpress(open(f).read())
wp.parse()
print wp.count()
item=wp.pop()
while item:
print item[0]
item=wp.pop()
| Python |
from wordpress import * | Python |
# -*- coding: utf-8 -*-
import os,logging
import re
from functools import wraps
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.api import memcache
from google.appengine.api import urlfetch
##import app.webapp as webapp2
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.utils.translation import activate
from django.template import TemplateDoesNotExist
from django.conf import settings
settings._target = None
#from model import g_blog,User
#activate(g_blog.language)
from google.appengine.api.labs import taskqueue
import wsgiref.handlers
from mimetypes import types_map
from datetime import datetime, timedelta
import urllib
import traceback
import micolog_template
logging.info('module base reloaded')
def urldecode(value):
return urllib.unquote(urllib.unquote(value)).decode('utf8')
def urlencode(value):
return urllib.quote(value.encode('utf8'))
def sid():
now=datetime.datetime.now()
return now.strftime('%y%m%d%H%M%S')+str(now.microsecond)
def requires_admin(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
if not self.is_login:
self.redirect(users.create_login_url(self.request.uri))
return
elif not (self.is_admin
or self.author):
return self.error(403)
else:
return method(self, *args, **kwargs)
return wrapper
def printinfo(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
print self #.__name__
print dir(self)
for x in self.__dict__:
print x
return method(self, *args, **kwargs)
return wrapper
#only ajax methed allowed
def ajaxonly(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.headers["X-Requested-With"]=="XMLHttpRequest":
self.error(404)
else:
return method(self, *args, **kwargs)
return wrapper
#only request from same host can passed
def hostonly(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.headers['Referer'].startswith(os.environ['HTTP_HOST'],7):
return method(self, *args, **kwargs)
else:
self.error(404)
return wrapper
def format_date(dt):
return dt.strftime('%a, %d %b %Y %H:%M:%S GMT')
def cache(key="",time=3600):
def _decorate(method):
def _wrapper(*args, **kwargs):
from model import g_blog
if not g_blog.enable_memcache:
method(*args, **kwargs)
return
request=args[0].request
response=args[0].response
skey=key+ request.path_qs
#logging.info('skey:'+skey)
html= memcache.get(skey)
#arg[0] is BaseRequestHandler object
if html:
logging.info('cache:'+skey)
response.last_modified =html[1]
ilen=len(html)
if ilen>=3:
response.set_status(html[2])
if ilen>=4:
for skey,value in html[3].items():
response.headers[skey]=value
response.out.write(html[0])
else:
if 'last-modified' not in response.headers:
response.last_modified = format_date(datetime.utcnow())
method(*args, **kwargs)
result=response.out.getvalue()
status_code = response._Response__status[0]
logging.debug("Cache:%s"%status_code)
memcache.set(skey,(result,response.last_modified,status_code,response.headers),time)
return _wrapper
return _decorate
#-------------------------------------------------------------------------------
class PingbackError(Exception):
"""Raised if the remote server caused an exception while pingbacking.
This is not raised if the pingback function is unable to locate a
remote server.
"""
_ = lambda x: x
default_messages = {
16: _(u'source URL does not exist'),
17: _(u'The source URL does not contain a link to the target URL'),
32: _(u'The specified target URL does not exist'),
33: _(u'The specified target URL cannot be used as a target'),
48: _(u'The pingback has already been registered'),
49: _(u'Access Denied')
}
del _
def __init__(self, fault_code, internal_message=None):
Exception.__init__(self)
self.fault_code = fault_code
self._internal_message = internal_message
def as_fault(self):
"""Return the pingback errors XMLRPC fault."""
return Fault(self.fault_code, self.internal_message or
'unknown server error')
@property
def ignore_silently(self):
"""If the error can be ignored silently."""
return self.fault_code in (17, 33, 48, 49)
@property
def means_missing(self):
"""If the error means that the resource is missing or not
accepting pingbacks.
"""
return self.fault_code in (32, 33)
@property
def internal_message(self):
if self._internal_message is not None:
return self._internal_message
return self.default_messages.get(self.fault_code) or 'server error'
@property
def message(self):
msg = self.default_messages.get(self.fault_code)
if msg is not None:
return _(msg)
return _(u'An unknown server error (%s) occurred') % self.fault_code
class util:
@classmethod
def do_trackback(cls, tbUrl=None, title=None, excerpt=None, url=None, blog_name=None):
taskqueue.add(url='/admin/do/trackback_ping',
params={'tbUrl': tbUrl,'title':title,'excerpt':excerpt,'url':url,'blog_name':blog_name})
#pingback ping
@classmethod
def do_pingback(cls,source_uri, target_uri):
taskqueue.add(url='/admin/do/pingback_ping',
params={'source': source_uri,'target':target_uri})
##cache variable
class Pager(object):
def __init__(self, model=None,query=None, items_per_page=10):
if model:
self.query = model.all()
else:
self.query=query
self.items_per_page = items_per_page
def fetch(self, p):
if hasattr(self.query,'__len__'):
max_offset=len(self.query)
else:
max_offset = self.query.count()
n = max_offset / self.items_per_page
if max_offset % self.items_per_page != 0:
n += 1
if p < 0 or p > n:
p = 1
offset = (p - 1) * self.items_per_page
if hasattr(self.query,'fetch'):
results = self.query.fetch(self.items_per_page, offset)
else:
results = self.query[offset:offset+self.items_per_page]
links = {'count':max_offset,'page_index':p,'prev': p - 1, 'next': p + 1, 'last': n}
if links['next'] > n:
links['next'] = 0
return (results, links)
class BaseRequestHandler(webapp.RequestHandler):
def __init__(self):
self.current='home'
def initialize(self, request, response):
webapp.RequestHandler.initialize(self, request, response)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from model import g_blog,User
self.blog = g_blog
self.login_user = users.get_current_user()
self.is_login = (self.login_user != None)
self.loginurl=users.create_login_url(self.request.uri)
self.logouturl=users.create_logout_url(self.request.uri)
self.is_admin = users.is_current_user_admin()
if self.is_admin:
self.auth = 'admin'
self.author=User.all().filter('email =',self.login_user.email()).get()
if not self.author:
self.author=User(dispname=self.login_user.nickname(),email=self.login_user.email())
self.author.isadmin=True
self.author.user=self.login_user
self.author.put()
elif self.is_login:
self.author=User.all().filter('email =',self.login_user.email()).get()
if self.author:
self.auth='author'
else:
self.auth = 'login'
else:
self.auth = 'guest'
try:
self.referer = self.request.headers['referer']
except:
self.referer = None
self.template_vals = {'self':self,'blog':self.blog,'current':self.current}
def __before__(self,*args):
pass
def __after__(self,*args):
pass
def error(self,errorcode,message='an error occured'):
if errorcode == 404:
message = 'Sorry, we were not able to find the requested page. We have logged this error and will look into it.'
elif errorcode == 403:
message = 'Sorry, that page is reserved for administrators. '
elif errorcode == 500:
message = "Sorry, the server encountered an error. We have logged this error and will look into it."
message+="<p><pre>"+traceback.format_exc()+"</pre><br></p>"
self.template_vals.update( {'errorcode':errorcode,'message':message})
if errorcode>0:
self.response.set_status(errorcode)
#errorfile=getattr(self.blog.theme,'error'+str(errorcode))
#logging.debug(errorfile)
## if not errorfile:
## errorfile=self.blog.theme.error
errorfile='error'+str(errorcode)+".html"
try:
content=micolog_template.render(self.blog.theme,errorfile, self.template_vals)
except TemplateDoesNotExist:
try:
content=micolog_template.render(self.blog.theme,"error.html", self.template_vals)
except TemplateDoesNotExist:
content=micolog_template.render(self.blog.default_theme,"error.html", self.template_vals)
except:
content=message
except:
content=message
self.response.out.write(content)
def get_render(self,template_file,values):
template_file=template_file+".html"
self.template_vals.update(values)
try:
#sfile=getattr(self.blog.theme, template_file)
logging.debug("get_render:"+template_file)
html = micolog_template.render(self.blog.theme, template_file, self.template_vals)
except TemplateDoesNotExist:
#sfile=getattr(self.blog.default_theme, template_file)
html = micolog_template.render(self.blog.default_theme, template_file, self.template_vals)
return html
def render(self,template_file,values):
"""
Helper method to render the appropriate template
"""
html=self.get_render(template_file,values)
self.response.out.write(html)
def message(self,msg,returl=None,title='Infomation'):
self.render('msg',{'message':msg,'title':title,'returl':returl})
def render2(self,template_file,template_vals={}):
"""
Helper method to render the appropriate template
"""
self.template_vals.update(template_vals)
path = os.path.join(self.blog.rootdir, template_file)
self.response.out.write(template.render(path, self.template_vals))
def param(self, name, **kw):
return self.request.get(name, **kw)
def paramint(self, name, default=0):
try:
return int(self.request.get(name))
except:
return default
def parambool(self, name, default=False):
try:
return self.request.get(name)=='on'
except:
return default
def write(self, s):
self.response.out.write(s)
def chk_login(self, redirect_url='/'):
if self.is_login:
return True
else:
self.redirect(redirect_url)
return False
def chk_admin(self, redirect_url='/'):
if self.is_admin:
return True
else:
self.redirect(redirect_url)
return False
| Python |
# -*- coding: utf-8 -*-
import logging
from django import template
from model import *
import django.template.defaultfilters as defaultfilters
import urllib
register = template.Library()
from datetime import *
@register.filter
def datetz(date,format): #datetime with timedelta
t=timedelta(seconds=3600*g_blog.timedelta)
return defaultfilters.date(date+t,format)
@register.filter
def TimestampISO8601(t):
"""Seconds since epoch (1970-01-01) --> ISO 8601 time string."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
@register.filter
def urlencode(value):
return urllib.quote(value.encode('utf8'))
@register.filter
def check_current(v1,v2):
if v1==v2:
return "current"
else:
return ""
@register.filter
def excerpt_more(entry,value='..more'):
return entry.get_content_excerpt(value.decode('utf8'))
@register.filter
def dict_value(v1,v2):
return v1[v2]
from app.html_filter import html_filter
plog_filter = html_filter()
plog_filter.allowed = {
'a': ('href', 'target', 'name'),
'b': (),
'blockquote': (),
'pre': (),
'em': (),
'i': (),
'img': ('src', 'width', 'height', 'alt', 'title'),
'strong': (),
'u': (),
'font': ('color', 'size'),
'p': (),
'h1': (),
'h2': (),
'h3': (),
'h4': (),
'h5': (),
'h6': (),
'table': (),
'tr': (),
'th': (),
'td': (),
'ul': (),
'ol': (),
'li': (),
'br': (),
'hr': (),
}
plog_filter.no_close += ('br',)
plog_filter.allowed_entities += ('nbsp','ldquo', 'rdquo', 'hellip',)
plog_filter.make_clickable_urls = False # enable this will get a bug about a and img
@register.filter
def do_filter(data):
return plog_filter.go(data)
'''
tag like {%mf header%}xxx xxx{%endmf%}
'''
@register.tag("mf")
def do_mf(parser, token):
nodelist = parser.parse(('endmf',))
parser.delete_first_token()
return MfNode(nodelist,token)
class MfNode(template.Node):
def __init__(self, nodelist,token):
self.nodelist = nodelist
self.token=token
def render(self, context):
tokens= self.token.split_contents()
if len(tokens)<2:
raise TemplateSyntaxError, "'mf' tag takes one argument: the filter name is needed"
fname=tokens[1]
output = self.nodelist.render(context)
return g_blog.tigger_filter(fname,output) | Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A simple wrapper for Django templates.
The main purpose of this module is to hide all of the package import pain
you normally have to go through to get Django to work. We expose the Django
Template and Context classes from this module, handling the import nonsense
on behalf of clients.
Typical usage:
from google.appengine.ext.webapp import template
print template.render('templates/index.html', {'foo': 'bar'})
Django uses a global setting for the directory in which it looks for templates.
This is not natural in the context of the webapp module, so our load method
takes in a complete template path, and we set these settings on the fly
automatically. Because we have to set and use a global setting on every
method call, this module is not thread safe, though that is not an issue
for applications.
Django template documentation is available at:
http://www.djangoproject.com/documentation/templates/
"""
import md5
import os,logging
try:
from django import v0_96
except ImportError:
pass
import django
import django.conf
try:
django.conf.settings.configure(
DEBUG=False,
TEMPLATE_DEBUG=False,
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.load_template_source',
),
)
except (EnvironmentError, RuntimeError):
pass
import django.template
import django.template.loader
from google.appengine.ext import webapp
def render(theme,template_file, template_dict, debug=False):
"""Renders the template at the given path with the given dict of values.
Example usage:
render("templates/index.html", {"name": "Bret", "values": [1, 2, 3]})
Args:
template_path: path to a Django template
template_dict: dictionary of values to apply to the template
"""
t = load(theme,template_file, debug)
return t.render(Context(template_dict))
template_cache = {}
def load(theme,template_file, debug=False):
"""Loads the Django template from the given path.
It is better to use this function than to construct a Template using the
class below because Django requires you to load the template with a method
if you want imports and extends to work in the template.
"""
#template_file=os.path.join("templates",template_file)
if theme.isZip:
theme_path=theme.server_dir
else:
theme_path=os.path.join( theme.server_dir,"templates")
abspath =os.path.join( theme_path,template_file)
logging.debug("theme_path:%s,abspath:%s"%(theme_path,abspath))
if not debug:
template = template_cache.get(abspath, None)
else:
template = None
if not template:
#file_name = os.path.split(abspath)
new_settings = {
'TEMPLATE_DIRS': (theme_path,),
'TEMPLATE_DEBUG': debug,
'DEBUG': debug,
}
old_settings = _swap_settings(new_settings)
try:
template = django.template.loader.get_template(template_file)
finally:
_swap_settings(old_settings)
if not debug:
template_cache[abspath] = template
def wrap_render(context, orig_render=template.render):
URLNode = django.template.defaulttags.URLNode
save_urlnode_render = URLNode.render
old_settings = _swap_settings(new_settings)
try:
URLNode.render = _urlnode_render_replacement
return orig_render(context)
finally:
_swap_settings(old_settings)
URLNode.render = save_urlnode_render
template.render = wrap_render
return template
def _swap_settings(new):
"""Swap in selected Django settings, returning old settings.
Example:
save = _swap_settings({'X': 1, 'Y': 2})
try:
...new settings for X and Y are in effect here...
finally:
_swap_settings(save)
Args:
new: A dict containing settings to change; the keys should
be setting names and the values settings values.
Returns:
Another dict structured the same was as the argument containing
the original settings. Original settings that were not set at all
are returned as None, and will be restored as None by the
'finally' clause in the example above. This shouldn't matter; we
can't delete settings that are given as None, since None is also a
legitimate value for some settings. Creating a separate flag value
for 'unset' settings seems overkill as there is no known use case.
"""
settings = django.conf.settings
old = {}
for key, value in new.iteritems():
old[key] = getattr(settings, key, None)
setattr(settings, key, value)
return old
def create_template_register():
"""Used to extend the Django template library with custom filters and tags.
To extend the template library with a custom filter module, create a Python
module, and create a module-level variable named "register", and register
all custom filters to it as described at
http://www.djangoproject.com/documentation/templates_python/
#extending-the-template-system:
templatefilters.py
==================
register = webapp.template.create_template_register()
def cut(value, arg):
return value.replace(arg, '')
register.filter(cut)
Then, register the custom template module with the register_template_library
function below in your application module:
myapp.py
========
webapp.template.register_template_library('templatefilters')
"""
return django.template.Library()
def register_template_library(package_name):
"""Registers a template extension module to make it usable in templates.
See the documentation for create_template_register for more information."""
if not django.template.libraries.get(package_name, None):
django.template.add_to_builtins(package_name)
Template = django.template.Template
Context = django.template.Context
def _urlnode_render_replacement(self, context):
"""Replacement for django's {% url %} block.
This version uses WSGIApplication's url mapping to create urls.
Examples:
<a href="{% url MyPageHandler "overview" %}">
{% url MyPageHandler implicit_args=False %}
{% url MyPageHandler "calendar" %}
{% url MyPageHandler "jsmith","calendar" %}
"""
args = [arg.resolve(context) for arg in self.args]
try:
app = webapp.WSGIApplication.active_instance
handler = app.get_registered_handler_by_name(self.view_name)
return handler.get_url(implicit_args=True, *args)
except webapp.NoUrlFoundError:
return ''
| Python |
# -*- coding: utf-8 -*-
import wsgiref.handlers
import xmlrpclib
from xmlrpclib import Fault
import sys
import cgi
import base64
#from datetime import datetime
import app.mktimefix as datetime
from SimpleXMLRPCServer import SimpleXMLRPCDispatcher
from functools import wraps
from django.utils.html import strip_tags
sys.path.append('modules')
from base import *
from model import *
from micolog_plugin import *
from urlparse import urlparse
def checkauth(pos=1):
def _decorate(method):
def _wrapper(*args, **kwargs):
username = args[pos+0]
password = args[pos+1]
if not (username and password and g_blog.rpcuser and g_blog.rpcpassword
and (g_blog.rpcuser==username)
and (g_blog.rpcpassword==password)):
raise ValueError("Authentication Failure")
args = args[0:pos]+args[pos+2:]
return method(*args, **kwargs)
return _wrapper
return _decorate
def format_date(d):
if not d: return None
#return xmlrpclib.DateTime(d.isoformat())
return xmlrpclib.DateTime(d)
def post_struct(entry):
if not entry:
raise Fault(404, "Post does not exist")
categories=[]
if entry.categorie_keys:
categories =[cate.name for cate in entry.categories]
struct = {
'postid': entry.key().id(),
'title': entry.title,
'link': entry.fullurl,
'permaLink': entry.fullurl,
'description': unicode(entry.content),
'categories': categories,
'userid': '1',
'mt_keywords':','.join(entry.tags),
'mt_excerpt': '',
'mt_text_more': '',
'mt_allow_comments': entry.allow_comment and 1 or 0,
'mt_allow_pings': entry.allow_trackback and 1 or 0,
'custom_fields':[],
'post_status':entry.post_status,
'sticky':entry.sticky,
'wp_author_display_name': entry.get_author_user().dispname,
'wp_author_id': str(entry.get_author_user().key().id()),
'wp_password': entry.password,
'wp_slug':entry.slug
}
if entry.date:
t=timedelta(seconds=3600*g_blog.timedelta)
struct['dateCreated'] = format_date(entry.date+t)
struct['date_created_gmt'] = format_date(entry.date)
return struct
def page_struct(entry):
if not entry:
raise Fault(404, "Post does not exist")
categories=[]
if entry.categorie_keys:
categories =[cate.name for cate in entry.categories]
struct = {
'page_id': entry.key().id(),
'title': entry.title,
'link': entry.fullurl,
'permaLink': entry.fullurl,
'description': unicode(entry.content),
'categories': categories,
'userid': '1',
'mt_allow_comments': entry.allow_comment and 1 or 0,
'mt_allow_pings': entry.allow_trackback and 1 or 0,
'custom_fields':[],
'page_status':entry.post_status,
'sticky':entry.sticky,
'wp_author_display_name': entry.get_author_user().dispname,
'wp_author_id': str(entry.get_author_user().key().id()),
'wp_password': entry.password,
'wp_slug':entry.slug,
'text_more': '',
'wp_author': 'admin',
'wp_page_order': entry.menu_order,
'wp_page_parent_id': 0,
'wp_page_parent_title': '',
'wp_page_template': 'default',
}
if entry.date:
struct['dateCreated'] = format_date(entry.date)
struct['date_created_gmt'] = format_date(entry.date)
return struct
def entry_title_struct(entry):
if not entry:
raise Fault(404, "Post does not exist")
struct = {
'postid': str(entry.key().id()),
'title': entry.title,
'userid': '1',
}
if entry.date:
struct['dateCreated'] = format_date(entry.date)
return struct
class Logger(db.Model):
request = db.TextProperty()
response = db.TextProperty()
date = db.DateTimeProperty(auto_now_add=True)
#-------------------------------------------------------------------------------
# blogger
#-------------------------------------------------------------------------------
@checkauth()
def blogger_getUsersBlogs(discard):
return [{'url' : g_blog.baseurl, 'blogid' : '1','isAdmin':True, 'blogName' : g_blog.title,'xmlrpc':g_blog.baseurl+"/rpc"}]
@checkauth(pos=2)
def blogger_deletePost(appkey, postid, publish=False):
post=Entry.get_by_id(int(postid))
post.delete()
return True
@checkauth()
def blogger_getUserInfo(appkey):
for user in User.all():
if user.isadmin:
return {'email':user.email,'firstname':'','nickname':user.dispname,'userid':str(user.key().id()),
'url':'','lastname':''}
return None
#-------------------------------------------------------------------------------
# metaWeblog
#-------------------------------------------------------------------------------
@checkauth()
def metaWeblog_newPost(blogid, struct, publish):
if struct.has_key('categories'):
cates = struct['categories']
else:
cates = []
newcates=[]
for cate in cates:
c=Category.all().filter('name =',cate)
if c:
newcates.append(c[0].key())
entry=Entry(title = struct['title'],
content = struct['description'],
categorie_keys=newcates
)
if struct.has_key('mt_text_more'):
content=struct['mt_text_more']
if content:
entry.content=entry.content+"<!--more-->"+struct['mt_text_more']
if struct.has_key('mt_keywords'):
entry.settags(struct['mt_keywords'])
if struct.has_key('wp_slug'):
entry.slug=struct['wp_slug']
if struct.has_key('mt_excerpt'):
entry.excerpt=struct['mt_excerpt']
try:
if struct.has_key('date_created_gmt'): #如果有日期属性
entry.date=datetime.strptime(str(struct['date_created_gmt']), "%Y%m%dT%H:%M:%S")
elif struct.has_key('dateCreated'): #如果有日期属性
entry.date=datetime.strptime(str(struct['dateCreated']), "%Y%m%dT%H:%M:%S")-timedelta(seconds=3600*g_blog.timedelta)
except:
pass
if struct.has_key('wp_password'):
entry.password=struct['wp_password']
if struct.has_key('sticky'):
entry.sticky=struct['sticky']
if struct.has_key('wp_author_id'):
author=User.get_by_id(int(struct['wp_author_id']))
entry.author=author.user
entry.author_name=author.dispname
else:
entry.author=g_blog.owner
entry.author_name=g_blog.author
if publish:
entry.save(True)
if struct.has_key('mt_tb_ping_urls'):
for url in struct['mt_tb_ping_urls']:
util.do_trackback(url,entry.title,entry.get_content_excerpt(more='')[:60],entry.fullurl,g_blog.title)
g_blog.tigger_action("xmlrpc_publish_post",entry)
else:
entry.save()
postid =entry.key().id()
return str(postid)
@checkauth()
def metaWeblog_newMediaObject(blogid,struct):
name=struct['name']
if struct.has_key('type'):
mtype=struct['type']
else:
st=name.split('.')
if len(st)>1:
mtype=st[-1]
else:
mtype=None
bits=db.Blob(str(struct['bits']))
media=Media(name=name,mtype=mtype,bits=bits)
media.put()
return {'url':g_blog.baseurl+'/media/'+str(media.key())}
@checkauth()
def metaWeblog_editPost(postid, struct, publish):
if struct.has_key('categories'):
cates = struct['categories']
else:
cates = []
newcates=[]
for cate in cates:
c=Category.all().filter('name =',cate).fetch(1)
if c:
newcates.append(c[0].key())
entry=Entry.get_by_id(int(postid))
if struct.has_key('mt_keywords'):
entry.settags(struct['mt_keywords'])
if struct.has_key('wp_slug'):
entry.slug=struct['wp_slug']
if struct.has_key('mt_excerpt'):
entry.excerpt=struct['mt_excerpt']
try:
if struct.has_key('date_created_gmt'): #如果有日期属性
entry.date=datetime.strptime(str(struct['date_created_gmt']), "%Y%m%dT%H:%M:%S")
elif struct.has_key('dateCreated'): #如果有日期属性
entry.date=datetime.strptime(str(struct['dateCreated']), "%Y%m%dT%H:%M:%S")-timedelta(seconds=3600*g_blog.timedelta)
except:
pass
if struct.has_key('wp_password'):
entry.password=struct['wp_password']
if struct.has_key('sticky'):
entry.sticky=struct['sticky']
if struct.has_key('wp_author_id'):
author=User.get_by_id(int(struct['wp_author_id']))
entry.author=author.user
entry.author_name=author.dispname
else:
entry.author=g_blog.owner
entry.author_name=g_blog.author
entry.title = struct['title']
entry.content = struct['description']
if struct.has_key('mt_text_more'):
content=struct['mt_text_more']
if content:
entry.content=entry.content+"<!--more-->"+struct['mt_text_more']
entry.categorie_keys=newcates
if publish:
entry.save(True)
else:
entry.save()
return True
@checkauth()
def metaWeblog_getCategories(blogid):
categories =Category.all()
cates=[]
for cate in categories:
cates.append({ 'categoryDescription':'',
'categoryId' : str(cate.ID()),
'parentId':'0',
'description':cate.name,
'categoryName':cate.name,
'htmlUrl':'',
'rssUrl':''
})
return cates
@checkauth()
def metaWeblog_getPost(postid):
entry = Entry.get_by_id(int(postid))
return post_struct(entry)
@checkauth()
def metaWeblog_getRecentPosts(blogid, num):
entries = Entry.all().filter('entrytype =','post').order('-date').fetch(min(num, 20))
return [post_struct(entry) for entry in entries]
#-------------------------------------------------------------------------------
# WordPress API
#-------------------------------------------------------------------------------
@checkauth(pos=0)
def wp_getUsersBlogs():
#return [{'url' : g_blog.baseurl, 'blog_id' : 1,'is_admin':True, 'blog_name' : g_blog.title,'xmlrpc_url':g_blog.baseurl+"/xmlrpc.php"}]
return [{'url' : g_blog.baseurl, 'blogid' : '1','isAdmin':True, 'blogName' : g_blog.title,'xmlrpc':g_blog.baseurl+"/rpc"}]
@checkauth()
def wp_getTags(blog_id):
def func(blog_id):
for tag in Tag.all():
yield {'tag_ID':'0','name':tag.tag,'count':str(tag.tagcount),'slug':tag.tag,'html_url':'','rss_url':''}
return list(func(blog_id))
@checkauth()
def wp_getCommentCount(blog_id,postid):
entry = Entry.get_by_id(postid)
if entry:
return {'approved':entry.commentcount,'awaiting_moderation':0,'spam':0,'total_comments':entry.commentcount}
@checkauth()
def wp_getPostStatusList(blogid):
return {'draft': 'Draft',
'pending': 'Pending Review',
'private': 'Private',
'publish': 'Published'}
@checkauth()
def wp_getPageStatusList(blogid):
return {'draft': 'Draft', 'private': 'Private', 'publish': 'Published'}
@checkauth()
def wp_getPageTemplates(blogid):
return {}
@checkauth()
def wp_setOptions(blogid,options):
for name,value in options,options.values():
if hasattr(g_blog,name):
setattr(g_blog,name,value)
return options
@checkauth()
def wp_getOptions(blogid,options):
#todo:Options is None ,return all attrbutes
mdict={}
if options:
for option in options:
if hasattr(g_blog,option):
mdict[option]={'desc':option,
'readonly:':False,
'value':getattr(g_blog,option)}
return mdict
@checkauth()
def wp_newCategory(blogid,struct):
name=struct['name']
category=Category.all().filter('name =',name).fetch(1)
if category and len(category):
return category[0].ID()
else:
#category=Category(key_name=urlencode(name), name=name,slug=urlencode(name))
category=Category(name=name,slug=name)
category.put()
return category.ID()
@checkauth()
def wp_newPage(blogid,struct,publish):
entry=Entry(title = struct['title'],
content = struct['description'],
)
if struct.has_key('mt_text_more'):
entry.content=entry.content+"<!--more-->"+struct['mt_text_more']
try:
if struct.has_key('date_created_gmt'): #如果有日期属性
entry.date=datetime.strptime(str(struct['date_created_gmt']), "%Y%m%dT%H:%M:%S")
elif struct.has_key('dateCreated'): #如果有日期属性
entry.date=datetime.strptime(str(struct['dateCreated']), "%Y%m%dT%H:%M:%S")-timedelta(seconds=3600*g_blog.timedelta)
except:
pass
if struct.has_key('wp_slug'):
entry.slug=struct['wp_slug']
if struct.has_key('wp_page_order'):
entry.menu_order=int(struct['wp_page_order'])
if struct.has_key('wp_password'):
entry.password=struct['wp_password']
if struct.has_key('wp_author_id'):
author=User.get_by_id(int(struct['wp_author_id']))
entry.author=author.user
entry.author_name=author.dispname
else:
entry.author=g_blog.owner
entry.author_name=g_blog.author
entry.entrytype='page'
if publish:
entry.save(True)
else:
entry.save()
postid =entry.key().id()
return str(postid)
@checkauth(2)
def wp_getPage(blogid,pageid):
entry = Entry.get_by_id(int(pageid))
return page_struct(entry)
@checkauth()
def wp_getPages(blogid,num=20):
entries = Entry.all().filter('entrytype =','page').order('-date').fetch(min(num, 20))
return [page_struct(entry) for entry in entries]
@checkauth(2)
def wp_editPage(blogid,pageid,struct,publish):
entry=Entry.get_by_id(int(pageid))
## if struct.has_key('mt_keywords'):
## entry.tags=struct['mt_keywords'].split(',')
if struct.has_key('wp_slug'):
entry.slug=struct['wp_slug']
if struct.has_key('wp_page_order'):
entry.menu_order=int(struct['wp_page_order'])
try:
if struct.has_key('date_created_gmt'): #如果有日期属性
entry.date=datetime.strptime(str(struct['date_created_gmt']), "%Y%m%dT%H:%M:%S")
elif struct.has_key('dateCreated'): #如果有日期属性
entry.date=datetime.strptime(str(struct['dateCreated']), "%Y%m%dT%H:%M:%S")-timedelta(seconds=3600*g_blog.timedelta)
except:
pass
if struct.has_key('wp_password'):
entry.password=struct['wp_password']
if struct.has_key('wp_author_id'):
author=User.get_by_id(int(struct['wp_author_id']))
entry.author=author.user
entry.author_name=author.dispname
else:
entry.author=g_blog.owner
entry.author_name=g_blog.author
entry.title = struct['title']
entry.content = struct['description']
if struct.has_key('mt_text_more'):
entry.content=entry.content+"<!--more-->"+struct['mt_text_more']
entry.save(True)
return True
@checkauth()
def wp_deletePage(blogid,pageid):
post=Entry.get_by_id(int(pageid))
post.delete()
return True
@checkauth()
def wp_getAuthors(blogid):
ulist=[]
i=1
for user in User.all():
ulist.append({'user_id':str(user.key().id()),'user_login':'admin','display_name':user.dispname})
i=i+1
return ulist
@checkauth()
def wp_deleteComment(blogid,commentid):
try:
comment=Comment.get_by_id(int(commentid))
if comment:
comment.delit()
return True
except:
return False
@checkauth()
def wp_editComment(blogid,commentid,struct):
try:
comment=Comment.get_by_id(int(commentid))
if comment:
url=struct['author_url']
if url:
try:
comment.weburl=url
except:
comment.weburl=None
#comment.date= format_date(datetime.now())
comment.author=struct['author']
#comment.weburl=struct['author_url']
comment.email=struct['author_email']
comment.content=struct['content']
#comment.status=struct['status']
comment.save()
return True
except:
raise
return False
@checkauth()
def wp_newComment(blogid,postid,struct):
post=Entry.get_by_id(postid)
if not post:
raise Fault(404, "Post does not exist")
comment=Comment(entry=post,content=struct['content'],
author=struct['author'],
email=struct['author_email'])
url=struct['author_url']
if url:
try:
comment.weburl=url
except:
comment.weburl=None
comment.save()
return comment.key().id()
@checkauth()
def wp_getCommentStatusList(blogid):
return {'hold':0,'approve':Comment.all().count(),'spam':0}
@checkauth()
def wp_getPageList(blogid,num=20):
def func(blogid):
entries = Entry.all().filter('entrytype =','page').order('-date').fetch(min(num, 20))
for entry in entries:
yield {'page_id':str(entry.key().id()),'page_title':entry.title,'page_parent_id':0,'dateCreated': format_date(entry.date),'date_created_gmt': format_date(entry.date)}
return list(func(blogid))
@checkauth()
def wp_deleteCategory(blogid,cateid):
try:
cate=Category.get_from_id(int(cateid))
cate.delete()
return True
except:
return False
@checkauth()
def wp_suggestCategories(blogid,category,max_result):
categories=Category.all()
cates=[]
for cate in categories:
cates.append({ 'categoryId' : str(cate.ID()),
'categoryName':cate.name
})
return cates[:max_result]
@checkauth()
def wp_getComment(blogid,commentid):
comment=Comment.get_by_id(int(commentid))
return {
'dateCreated':format_date(comment.date),
'date_created_gmt':format_date(comment.date),
'user_id':'0',
'comment_id':str(comment.key().id()),
'parent':'',
'status':'approve',
'content':unicode(comment.content),
'link':comment.entry.link+"#comment-"+str(comment.key().id()),
'post_id':str(comment.entry.key().id()),
'post_title':comment.entry.title,
'author':comment.author,
'author_url':str(comment.weburl),
'author_email':str(comment.email),
'author_ip':comment.ip,
'type':''
}
@checkauth()
def wp_getComments(blogid,data):
def func(blogid,data):
number=int(data['number'])
try:
offset=int(data['offset'])
except:
offset=0
comments=[]
if data['post_id']:
postid=int(data['post_id'])
post=Entry.get_by_id(postid)
if post:
comments=post.comments()
else:
comments=Comment.all()
for comment in comments.fetch(number,offset):
yield {
'dateCreated':format_date(comment.date),
'date_created_gmt':format_date(comment.date),
'user_id':'0',
'comment_id':str(comment.key().id()),
'parent':'',
'status':'approve',
'content':unicode(comment.content),
'link':comment.entry.link+"#comment-"+str(comment.key().id()),
'post_id':str(comment.entry.key().id()),
'post_title':comment.entry.title,
'author':comment.author,
'author_url':str(comment.weburl),
'author_email':str(comment.email),
'author_ip':comment.ip,
'type':''
}
return list(func(blogid,data))
@checkauth()
def mt_getPostCategories(postid):
post=Entry.get_by_id(int(postid))
categories=post.categories
cates=[]
for cate in categories:
#cate=Category(key)
cates.append({'categoryId' : str(cate.ID()),
'categoryName':cate.name,
'isPrimary':True
})
return cates
@checkauth()
def mt_getCategoryList(blogid):
categories=Category.all()
cates=[]
for cate in categories:
cates.append({ 'categoryId' : str(cate.ID()),
'categoryName':cate.name
})
return cates
@checkauth()
def mt_setPostCategories(postid,cates):
try:
entry=Entry.get_by_id(int(postid))
newcates=[]
for cate in cates:
if cate.has_key('categoryId'):
id=int(cate['categoryId'])
c=Category.get_from_id(int(cate['categoryId']))
if c:
newcates.append(c.key())
entry.categorie_keys=newcates
entry.put()
return True
except:
return False
@checkauth()
def mt_publishPost(postid):
try:
entry=Entry.get_by_id(int(postid))
entry.save(True)
return entry.key().id()
except:
return 0
@checkauth()
def mt_getRecentPostTitles(blogid,num):
entries = Entry.all().filter('entrytype =','post').order('-date').fetch(min(num, 20))
return [entry_title_struct(entry) for entry in entries]
#------------------------------------------------------------------------------
#pingback
#------------------------------------------------------------------------------
_title_re = re.compile(r'<title>(.*?)</title>(?i)')
_pingback_re = re.compile(r'<link rel="pingback" href="([^"]+)" ?/?>(?i)')
_chunk_re = re.compile(r'\n\n|<(?:p|div|h\d)[^>]*>')
def pingback_ping(source_uri, target_uri):
# next we check if the source URL does indeed exist
if not g_blog.allow_pingback:
raise Fault(49,"Access denied.")
try:
g_blog.tigger_action("pre_ping",source_uri,target_uri)
response = urlfetch.fetch(source_uri)
except Exception ,e :
#logging.info(e.message)
raise Fault(16, 'The source URL does not exist.%s'%source_uri)
# we only accept pingbacks for links below our blog URL
blog_url = g_blog.baseurl
if not blog_url.endswith('/'):
blog_url += '/'
if not target_uri.startswith(blog_url):
raise Fault(32, 'The specified target URL does not exist.')
path_info = target_uri[len(blog_url):]
pingback_post(response,source_uri,target_uri,path_info)
try:
return "Micolog pingback succeed!"
except:
raise Fault(49,"Access denied.")
def get_excerpt(response, url_hint, body_limit=1024 * 512):
"""Get an excerpt from the given `response`. `url_hint` is the URL
which will be used as anchor for the excerpt. The return value is a
tuple in the form ``(title, body)``. If one of the two items could
not be calculated it will be `None`.
"""
contents = response.content[:body_limit]
title_match = _title_re.search(contents)
title = title_match and strip_tags(title_match.group(1)) or None
link_re = re.compile(r'<a[^>]+?"\s*%s\s*"[^>]*>(.*?)</a>(?is)' %
re.escape(url_hint))
for chunk in _chunk_re.split(contents):
match = link_re.search(chunk)
if not match:
continue
before = chunk[:match.start()]
after = chunk[match.end():]
raw_body = '%s\0%s' % (strip_tags(before).replace('\0', ''),
strip_tags(after).replace('\0', ''))
body_match = re.compile(r'(?:^|\b)(.{0,120})\0(.{0,120})(?:\b|$)') \
.search(raw_body)
if body_match:
break
else:
return title, None
before, after = body_match.groups()
link_text = strip_tags(match.group(1))
if len(link_text) > 60:
link_text = link_text[:60] + u' …'
bits = before.split()
bits.append(link_text)
bits.extend(after.split())
return title, u'[…] %s […]' % u' '.join(bits)
def pingback_post(response,source_uri, target_uri, slug):
"""This is the pingback handler for posts."""
entry = Entry.all().filter("published =", True).filter('link =', slug).get()
#use allow_trackback as allow_pingback
if entry is None or not entry.allow_trackback:
raise Fault(33, 'no such post')
title, excerpt = get_excerpt(response, target_uri)
if not title:
raise Fault(17, 'no title provided')
elif not excerpt:
raise Fault(17, 'no useable link to target')
comment = Comment.all().filter("entry =", entry).filter("weburl =", source_uri).get()
if comment:
raise Fault(48, 'pingback has already been registered')
return
comment=Comment(author=urlparse(source_uri).hostname,
content="<strong>"+title[:250]+"...</strong><br/>" +
excerpt[:250] + '...',
weburl=source_uri,
entry=entry)
comment.ctype=COMMENT_PINGBACK
try:
comment.save()
g_blog.tigger_action("pingback_post",comment)
memcache.delete("/"+entry.link)
return True
except:
raise Fault(49,"Access denied.")
return
##------------------------------------------------------------------------------
class PlogXMLRPCDispatcher(SimpleXMLRPCDispatcher):
def __init__(self, funcs):
SimpleXMLRPCDispatcher.__init__(self, True, 'utf-8')
self.funcs = funcs
self.register_introspection_functions()
dispatcher = PlogXMLRPCDispatcher({
'blogger.getUsersBlogs' : blogger_getUsersBlogs,
'blogger.deletePost' : blogger_deletePost,
'blogger.getUserInfo': blogger_getUserInfo,
'metaWeblog.newPost' : metaWeblog_newPost,
'metaWeblog.editPost' : metaWeblog_editPost,
'metaWeblog.getCategories' : metaWeblog_getCategories,
'metaWeblog.getPost' : metaWeblog_getPost,
'metaWeblog.getRecentPosts' : metaWeblog_getRecentPosts,
'metaWeblog.newMediaObject':metaWeblog_newMediaObject,
'wp.getUsersBlogs':wp_getUsersBlogs,
'wp.getTags':wp_getTags,
'wp.getCommentCount':wp_getCommentCount,
'wp.getPostStatusList':wp_getPostStatusList,
'wp.getPageStatusList':wp_getPageStatusList,
'wp.getPageTemplates':wp_getPageTemplates,
'wp.getOptions':wp_getOptions,
'wp.setOptions':wp_setOptions,
'wp.getCategories':metaWeblog_getCategories,
'wp.newCategory':wp_newCategory,
'wp.newPage':wp_newPage,
'wp.getPage':wp_getPage,
'wp.getPages':wp_getPages,
'wp.editPage':wp_editPage,
'wp.getPageList':wp_getPageList,
'wp.deletePage':wp_deletePage,
'wp.getAuthors':wp_getAuthors,
'wp.deleteComment':wp_deleteComment,
'wp.editComment':wp_editComment,
'wp.newComment':wp_newComment,
'wp.getCommentStatusList':wp_getCommentStatusList,
'wp.deleteCategory':wp_deleteCategory,
'wp.suggestCategories':wp_suggestCategories,
'wp.getComment':wp_getComment,
'wp.getComments':wp_getComments,
'wp.uploadFile':metaWeblog_newMediaObject,
'mt.setPostCategories':mt_setPostCategories,
'mt.getPostCategories':mt_getPostCategories,
'mt.getCategoryList':mt_getCategoryList,
'mt.publishPost':mt_publishPost,
'mt.getRecentPostTitles':mt_getRecentPostTitles,
##pingback
'pingback.ping':pingback_ping,
})
# {{{ Handlers
class CallApi(BaseRequestHandler):
def get(self):
Logger(request = self.request.uri, response = '----------------------------------').put()
self.write('<h1>please use POST</h1>')
def post(self):
#self.response.headers['Content-Type'] = 'application/xml; charset=utf-8'
request = self.request.body
response = dispatcher._marshaled_dispatch(request)
Logger(request = unicode(request, 'utf-8'), response = unicode(response, 'utf-8')).put()
self.write(response)
class View(BaseRequestHandler):
@requires_admin
def get(self):
self.write('<html><body><h1>Logger</h1>')
for log in Logger.all().order('-date').fetch(5,0):
self.write("<p>date: %s</p>" % log.date)
self.write("<h1>Request</h1>")
self.write('<pre>%s</pre>' % cgi.escape(log.request))
self.write("<h1>Reponse</h1>")
self.write('<pre>%s</pre>' % cgi.escape(log.response))
self.write("<hr />")
self.write('</body></html>')
class DeleteLog(BaseRequestHandler):
def get(self):
if self.chk_admin():
for log in Logger.all():
log.delete()
self.redirect('/rpc/view')
#}}}
def main():
#webapp.template.register_template_library("filter")
application = webapp.WSGIApplication(
[
('/rpc', CallApi),
('/xmlrpc\.php',CallApi),
('/rpc/view', View),
('/rpc/dellog', DeleteLog),
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
# -*- coding: utf-8 -*-
import cgi, os,sys,math
import wsgiref.handlers
import google.appengine.api
# Google App Engine imports.
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template, \
WSGIApplication
from google.appengine.api import users
##import app.webapp as webapp2
from google.appengine.ext import db
# Force Django to reload its settings.
from datetime import datetime ,timedelta
import base64,random
from django.utils import simplejson
import filter as myfilter
from django.template.loader import *
##settings.configure(LANGUAGE_CODE = 'zh-cn')
# Must set this env var before importing any part of Django
from app.safecode import Image
from app.gmemsess import Session
from base import *
from model import *
##os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
##from django.utils.translation import activate
##from django.conf import settings
##settings._target = None
##activate(g_blog.language)
from google.appengine.ext import zipserve
def doRequestHandle(old_handler,new_handler,**args):
new_handler.initialize(old_handler.request,old_handler.response)
return new_handler.get(**args)
def doRequestPostHandle(old_handler,new_handler,**args):
new_handler.initialize(old_handler.request,old_handler.response)
return new_handler.post(**args)
class BasePublicPage(BaseRequestHandler):
def initialize(self, request, response):
BaseRequestHandler.initialize(self,request, response)
m_pages=Entry.all().filter('entrytype =','page')\
.filter('published =',True)\
.filter('entry_parent =',0)\
.order('menu_order')
blogroll=Link.all().filter('linktype =','blogroll')
archives=Archive.all().order('-year').order('-month').fetch(12)
alltags=Tag.all()
self.template_vals.update({
'menu_pages':m_pages,
'categories':Category.all(),
'blogroll':blogroll,
'archives':archives,
'alltags':alltags,
'recent_comments':Comment.all().order('-date').fetch(5)
})
def m_list_pages(self):
menu_pages=None
entry=None
if self.template_vals.has_key('menu_pages'):
menu_pages= self.template_vals['menu_pages']
if self.template_vals.has_key('entry'):
entry=self.template_vals['entry']
ret=''
current=''
for page in menu_pages:
if entry and entry.entrytype=='page' and entry.key()==page.key():
current= 'current_page_item'
else:
current= 'page_item'
#page is external page ,and page.slug is none.
if page.is_external_page and not page.slug:
ret+='<li class="%s"><a href="%s" target="%s" >%s</a></li>'%( current,page.link,page.target, page.title)
else:
ret+='<li class="%s"><a href="/%s" target="%s">%s</a></li>'%( current,page.link, page.target,page.title)
return ret
def sticky_entrys(self):
return Entry.all().filter('entrytype =','post')\
.filter('published =',True)\
.filter('sticky =',True)\
.order('-date')
class MainPage(BasePublicPage):
def get(self,page=1):
postid=self.param('p')
if postid:
try:
postid=int(postid)
return doRequestHandle(self,SinglePost(),postid=postid) #singlepost.get(postid=postid)
except:
return self.error(404)
self.doget(page)
def post(self):
postid=self.param('p')
if postid:
try:
postid=int(postid)
return doRequestPostHandle(self,SinglePost(),postid=postid) #singlepost.get(postid=postid)
except:
return self.error(404)
@cache()
def doget(self,page):
page=int(page)
entrycount=g_blog.postscount()
max_page = entrycount / g_blog.posts_per_page + ( entrycount % g_blog.posts_per_page and 1 or 0 )
if page < 1 or page > max_page:
return self.error(404)
entries = Entry.all().filter('entrytype =','post').\
filter("published =", True).order('-date').\
fetch(self.blog.posts_per_page, offset = (page-1) * self.blog.posts_per_page)
show_prev =entries and (not (page == 1))
show_next =entries and (not (page == max_page))
#print page,max_page,g_blog.entrycount,self.blog.posts_per_page
return self.render('index',{'entries':entries,
'show_prev' : show_prev,
'show_next' : show_next,
'pageindex':page,
'ishome':True
})
class entriesByCategory(BasePublicPage):
@cache()
def get(self,slug=None):
if not slug:
self.error(404)
return
try:
page_index=int(self.param('page'))
except:
page_index=1
slug=urldecode(slug)
cats=Category.all().filter('slug =',slug).fetch(1)
if cats:
entries=Entry.all().filter("published =", True).filter('categorie_keys =',cats[0].key()).order("-date")
entries,links=Pager(query=entries,items_per_page=20).fetch(page_index)
self.render('category',{'entries':entries,'category':cats[0],'pager':links})
else:
self.error(414,slug)
class archive_by_month(BasePublicPage):
@cache()
def get(self,year,month):
try:
page_index=int (self.param('page'))
except:
page_index=1
firstday=datetime(int(year),int(month),1)
if int(month)!=12:
lastday=datetime(int(year),int(month)+1,1)
else:
lastday=datetime(int(year)+1,1,1)
entries=db.GqlQuery("SELECT * FROM Entry WHERE date > :1 AND date <:2 AND entrytype =:3 AND published = True ORDER BY date DESC",firstday,lastday,'post')
entries,links=Pager(query=entries).fetch(page_index)
self.render('month',{'entries':entries,'year':year,'month':month,'pager':links})
class entriesByTag(BasePublicPage):
@cache()
def get(self,slug=None):
if not slug:
self.error(404)
return
try:
page_index=int (self.param('page'))
except:
page_index=1
import urllib
slug=urldecode(slug)
entries=Entry.all().filter("published =", True).filter('tags =',slug).order("-date")
entries,links=Pager(query=entries,items_per_page=20).fetch(page_index)
self.render('tag',{'entries':entries,'tag':slug,'pager':links})
class SinglePost(BasePublicPage):
@cache()
def get(self,slug=None,postid=None):
if postid:
entries = Entry.all().filter("published =", True).filter('post_id =', postid).fetch(1)
else:
slug=urldecode(slug)
entries = Entry.all().filter("published =", True).filter('link =', slug).fetch(1)
if not entries or len(entries) == 0:
return self.error(404)
mp=self.paramint("mp",1)
entry=entries[0]
if entry.is_external_page:
return self.redirect(entry.external_page_address,True)
if g_blog.allow_pingback and entry.allow_trackback:
self.response.headers['X-Pingback']="%s/rpc"%str(g_blog.baseurl)
entry.readtimes += 1
entry.put()
self.entry=entry
comments=entry.get_comments_by_page(mp,self.blog.comments_per_page)
## commentuser=self.request.cookies.get('comment_user', '')
## if commentuser:
## commentuser=commentuser.split('#@#')
## else:
commentuser=['','','']
comments_nav=self.get_comments_nav(mp,entry.comments().count())
if entry.entrytype=='post':
self.render('single',
{
'entry':entry,
'relateposts':entry.relateposts,
'comments':comments,
'user_name':commentuser[0],
'user_email':commentuser[1],
'user_url':commentuser[2],
'checknum1':random.randint(1,10),
'checknum2':random.randint(1,10),
'comments_nav':comments_nav,
})
else:
self.render('page',
{'entry':entry,
'relateposts':entry.relateposts,
'comments':comments,
'user_name':commentuser[0],
'user_email':commentuser[1],
'user_url':commentuser[2],
'checknum1':random.randint(1,10),
'checknum2':random.randint(1,10),
'comments_nav':comments_nav,
})
def post(self,slug=None,postid=None):
'''handle trackback'''
error = '''
<?xml version="1.0" encoding="utf-8"?>
<response>
<error>1</error>
<message>%s</message>
</response>
'''
success = '''
<?xml version="1.0" encoding="utf-8"?>
<response>
<error>0</error>
</response>
'''
if not g_blog.allow_trackback:
self.response.out.write(self.error % "Trackback denied.")
return
self.response.headers['Content-Type'] = "text/xml"
if postid:
entries = Entry.all().filter("published =", True).filter('post_id =', postid).fetch(1)
else:
slug=urldecode(slug)
entries = Entry.all().filter("published =", True).filter('link =', slug).fetch(1)
if not entries or len(entries) == 0 :#or (postid and not entries[0].link.endswith(g_blog.default_link_format%{'post_id':postid})):
self.response.out.write(error % "empty slug/postid")
return
#check code ,rejest spam
entry=entries[0]
#key=self.param("code")
#if (self.request.uri!=entry.trackbackurl) or entry.is_external_page or not entry.allow_trackback:
import cgi
param=cgi.parse_qs(self.request.uri)
if param.has_key('code'):
code=param['code'][0]
if (not str(entry.key())==code) or entry.is_external_page or not entry.allow_trackback:
self.response.out.write(error % "Invalid trackback url.")
return
coming_url = self.param('url')
blog_name = myfilter.do_filter(self.param('blog_name'))
excerpt = myfilter.do_filter(self.param('excerpt'))
title = myfilter.do_filter(self.param('title'))
if not coming_url or not blog_name or not excerpt or not title:
self.response.out.write(error % "not enough post info")
return
import time
#wait for half second in case otherside hasn't been published
time.sleep(0.5)
## #also checking the coming url is valid and contains our link
## #this is not standard trackback behavior
## try:
##
## result = urlfetch.fetch(coming_url)
## if result.status_code != 200 :
## #or ((g_blog.baseurl + '/' + slug) not in result.content.decode('ascii','ignore')):
## self.response.out.write(error % "probably spam")
## return
## except Exception, e:
## logging.info("urlfetch error")
## self.response.out.write(error % "urlfetch error")
## return
comment = Comment.all().filter("entry =", entry).filter("weburl =", coming_url).get()
if comment:
self.response.out.write(error % "has pinged before")
return
comment=Comment(author=blog_name,
content="<strong>"+title[:250]+"...</strong><br/>" +
excerpt[:250] + '...',
weburl=coming_url,
entry=entry)
comment.ip=self.request.remote_addr
comment.ctype=COMMENT_TRACKBACK
try:
comment.save()
memcache.delete("/"+entry.link)
self.write(success)
g_blog.tigger_action("pingback_post",comment)
except:
self.response.out.write(error % "unknow error")
def get_comments_nav(self,pindex,count):
maxpage=count / g_blog.comments_per_page + ( count % g_blog.comments_per_page and 1 or 0 )
if maxpage==1:
return ""
result=""
if pindex>1:
result="<a class='comment_prev' href='"+self.get_comments_pagenum_link(pindex-1)+"'>«</a>"
minr=max(pindex-3,1)
maxr=min(pindex+3,maxpage)
if minr>2:
result+="<a class='comment_num' href='"+self.get_comments_pagenum_link(1)+"'>1</a>"
result+="<span class='comment_dot' >...</span>"
for n in range(minr,maxr+1):
if n==pindex:
result+="<span class='comment_current'>"+str(n)+"</span>"
else:
result+="<a class='comment_num' href='"+self.get_comments_pagenum_link(n)+"'>"+str(n)+"</a>"
if maxr<maxpage-1:
result+="<span class='comment_dot' >...</span>"
result+="<a class='comment_num' href='"+self.get_comments_pagenum_link(maxpage)+"'>"+str(maxpage)+"</a>"
if pindex<maxpage:
result+="<a class='comment_next' href='"+self.get_comments_pagenum_link(pindex+1)+"'>»</a>"
return {'nav':result,'current':pindex,'maxpage':maxpage}
def get_comments_pagenum_link(self,pindex):
url=str(self.entry.link)
if url.find('?')>=0:
return "/"+url+"&mp="+str(pindex)+"#comments"
else:
return "/"+url+"?mp="+str(pindex)+"#comments"
class FeedHandler(BaseRequestHandler):
@cache(time=600)
def get(self,tags=None):
entries = Entry.all().filter('entrytype =','post').filter('published =',True).order('-date').fetch(10)
if entries and entries[0]:
last_updated = entries[0].date
last_updated = last_updated.strftime("%Y-%m-%dT%H:%M:%SZ")
for e in entries:
e.formatted_date = e.date.strftime("%Y-%m-%dT%H:%M:%SZ")
self.response.headers['Content-Type'] = 'application/atom+xml'
self.render2('views/atom.xml',{'entries':entries,'last_updated':last_updated})
class CommentsFeedHandler(BaseRequestHandler):
@cache(time=600)
def get(self,tags=None):
comments = Comment.all().order('-date').fetch(10)
if comments and comments[0]:
last_updated = comments[0].date
last_updated = last_updated.strftime("%Y-%m-%dT%H:%M:%SZ")
for e in comments:
e.formatted_date = e.date.strftime("%Y-%m-%dT%H:%M:%SZ")
self.response.headers['Content-Type'] = 'application/atom+xml'
self.render2('views/comments.xml',{'comments':comments,'last_updated':last_updated})
class SitemapHandler(BaseRequestHandler):
@cache(time=36000)
def get(self,tags=None):
urls = []
def addurl(loc,lastmod=None,changefreq=None,priority=None):
url_info = {
'location': loc,
'lastmod': lastmod,
'changefreq': changefreq,
'priority': priority
}
urls.append(url_info)
addurl(g_blog.baseurl,changefreq='daily',priority=0.9 )
entries = Entry.all().filter('published =',True).order('-date').fetch(g_blog.sitemap_entries)
for item in entries:
loc = "%s/%s" % (g_blog.baseurl, item.link)
addurl(loc,item.mod_date or item.date,'never',0.6)
if g_blog.sitemap_include_category:
cats=Category.all()
for cat in cats:
loc="%s/category/%s"%(g_blog.baseurl,cat.slug)
addurl(loc,None,'weekly',0.5)
if g_blog.sitemap_include_tag:
tags=Tag.all()
for tag in tags:
loc="%s/tag/%s"%(g_blog.baseurl, urlencode(tag.tag))
addurl(loc,None,'weekly',0.5)
## self.response.headers['Content-Type'] = 'application/atom+xml'
self.render2('views/sitemap.xml',{'urlset':urls})
class Error404(BaseRequestHandler):
@cache(time=36000)
def get(self,slug=None):
self.error(404)
class Post_comment(BaseRequestHandler):
#@printinfo
def post(self,slug=None):
useajax=self.param('useajax')=='1'
name=self.param('author')
email=self.param('email')
url=self.param('url')
key=self.param('key')
content=self.param('comment')
parent_id=self.paramint('parentid',0)
reply_notify_mail=self.parambool('reply_notify_mail')
sess=Session(self,timeout=180)
if not self.is_login:
#if not (self.request.cookies.get('comment_user', '')):
try:
check_ret=True
if g_blog.comment_check_type in (1,2) :
checkret=self.param('checkret')
logging.info('______'+checkret)
check_ret=(int(checkret) == sess['code'])
elif g_blog.comment_check_type ==3:
import app.gbtools as gb
checknum=self.param('checknum')
checkret=self.param('checkret')
check_ret=eval(checknum)==int(gb.stringQ2B( checkret))
if not check_ret:
if useajax:
self.write(simplejson.dumps((False,-102,_('Your check code is invalid .'))))
else:
self.error(-102,_('Your check code is invalid .'))
return
except:
if useajax:
self.write(simplejson.dumps((False,-102,_('Your check code is invalid .'))))
else:
self.error(-102,_('Your check code is invalid .'))
return
sess.invalidate()
content=content.replace('\n','<br>')
content=myfilter.do_filter(content)
name=cgi.escape(name)[:20]
url=cgi.escape(url)[:100]
if not (name and email and content):
if useajax:
self.write(simplejson.dumps((False,-101,_('Please input name, email and comment .'))))
else:
self.error(-101,_('Please input name, email and comment .'))
else:
comment=Comment(author=name,
content=content,
email=email,
reply_notify_mail=reply_notify_mail,
entry=Entry.get(key))
if url:
try:
comment.weburl=url
except:
comment.weburl=None
#name=name.decode('utf8').encode('gb2312')
info_str='#@#'.join([urlencode(name),urlencode(email),urlencode(url)])
#info_str='#@#'.join([name,email,url.encode('utf8')])
cookiestr='comment_user=%s;expires=%s;domain=%s;path=/'%( info_str,
(datetime.now()+timedelta(days=100)).strftime("%a, %d-%b-%Y %H:%M:%S GMT"),
''
)
comment.ip=self.request.remote_addr
if parent_id:
comment.parent=Comment.get_by_id(parent_id)
try:
comment.save()
memcache.delete("/"+comment.entry.link)
self.response.headers.add_header( 'Set-Cookie', cookiestr)
if useajax:
comment_c=self.get_render('comment',{'comment':comment})
self.write(simplejson.dumps((True,comment_c.decode('utf8'))))
else:
self.redirect(self.referer+"#comment-"+str(comment.key().id()))
comment.entry.removecache()
memcache.delete("/feed/comments")
except:
if useajax:
self.write(simplejson.dumps((False,-102,_('Comment not allowed.'))))
else:
self.error(-102,_('Comment not allowed .'))
class ChangeTheme(BaseRequestHandler):
@requires_admin
def get(self,slug=None):
theme=self.param('t')
g_blog.theme_name=theme
g_blog.get_theme()
self.redirect('/')
class do_action(BaseRequestHandler):
def get(self,slug=None):
try:
func=getattr(self,'action_'+slug)
if func and callable(func):
func()
else:
self.error(404)
except BaseException,e:
self.error(404)
def post(self,slug=None):
try:
func=getattr(self,'action_'+slug)
if func and callable(func):
func()
else:
self.error(404)
except:
self.error(404)
@ajaxonly
def action_info_login(self):
if self.login_user:
self.write(simplejson.dumps({'islogin':True,
'isadmin':self.is_admin,
'name': self.login_user.nickname()}))
else:
self.write(simplejson.dumps({'islogin':False}))
#@hostonly
@cache()
def action_proxy(self):
result=urlfetch.fetch(self.param("url"), headers=self.request.headers)
if result.status_code == 200:
self.response.headers['Expires'] = 'Thu, 15 Apr 3010 20:00:00 GMT'
self.response.headers['Cache-Control'] = 'max-age=3600,public'
self.response.headers['Content-Type'] = result.headers['Content-Type']
self.response.out.write(result.content)
return
def action_getcomments(self):
key=self.param('key')
entry=Entry.get(key)
comments=Comment.all().filter("entry =",key)
commentuser=self.request.cookies.get('comment_user', '')
if commentuser:
commentuser=commentuser.split('#@#')
else:
commentuser=['','','']
vals={
'entry':entry,
'comments':comments,
'user_name':commentuser[0],
'user_email':commentuser[1],
'user_url':commentuser[2],
'checknum1':random.randint(1,10),
'checknum2':random.randint(1,10),
}
html=self.get_render('comments',vals)
self.write(simplejson.dumps(html.decode('utf8')))
def action_test(self):
self.write(settings.LANGUAGE_CODE)
self.write(_("this is a test"))
class getMedia(webapp.RequestHandler):
def get(self,slug):
media=Media.get(slug)
if media:
self.response.headers['Expires'] = 'Thu, 15 Apr 3010 20:00:00 GMT'
self.response.headers['Cache-Control'] = 'max-age=3600,public'
self.response.headers['Content-Type'] = str(media.mtype)
self.response.out.write(media.bits)
a=self.request.get('a')
if a and a.lower()=='download':
media.download+=1
media.put()
class CheckImg(BaseRequestHandler):
def get(self):
img = Image()
imgdata = img.create()
sess=Session(self,timeout=900)
if not sess.is_new():
sess.invalidate()
sess=Session(self,timeout=900)
sess['code']=img.text
sess.save()
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(imgdata)
class CheckCode(BaseRequestHandler):
def get(self):
sess=Session(self,timeout=900)
num1=random.randint(1,10)
num2=random.randint(1,10)
code="<span style='font-size:13px;color:red'>%d + %d =</span>"%(num1,num2)
sess['code']=num1+num2
sess.save()
#self.response.headers['Content-Type'] = "text/html"
self.response.out.write(code)
class Other(BaseRequestHandler):
def get(self,slug=None):
if not g_blog.tigger_urlmap(slug,page=self):
self.error(404)
def post(self,slug=None):
content=g_blog.tigger_urlmap(slug,page=self)
if content:
self.write(content)
else:
self.error(404)
def getZipHandler(**args):
return ('/xheditor/(.*)',zipserve.make_zip_handler('''D:\\work\\micolog\\plugins\\xheditor\\xheditor.zip'''))
def main():
webapp.template.register_template_library('filter')
webapp.template.register_template_library('app.recurse')
urls= [('/media/([^/]*)/{0,1}.*',getMedia),
('/checkimg/', CheckImg),
('/checkcode/', CheckCode),
('/skin',ChangeTheme),
('/feed', FeedHandler),
('/feed/comments',CommentsFeedHandler),
('/sitemap', SitemapHandler),
('/post_comment',Post_comment),
('/page/(?P<page>\d+)', MainPage),
('/category/(.*)',entriesByCategory),
('/(\d{4})/(\d{2})',archive_by_month),
('/tag/(.*)',entriesByTag),
#('/\?p=(?P<postid>\d+)',SinglePost),
('/', MainPage),
('/do/(\w+)', do_action),
('/e/(.*)',Other),
('/([\\w\\-\\./%]+)', SinglePost),
('.*',Error404),
]
application = webapp.WSGIApplication(urls,debug=False)
g_blog.application=application
g_blog.plugins.register_handlerlist(application)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main() | Python |
# Wrapper for loading templates from zipfile.
import zipfile,logging,os
from django.template import TemplateDoesNotExist
from django.conf import settings
logging.debug("zipload imported")
zipfile_cache={}
_TEMPLATES_='templates'
def get_from_zipfile(zipfilename,name):
logging.debug("get_from_zipfile(%s,%s)"%(zipfilename,name))
zipfile_object = zipfile_cache.get(zipfilename)
if zipfile_object is None:
try:
zipfile_object = zipfile.ZipFile(zipfilename)
except (IOError, RuntimeError), err:
logging.error('Can\'t open zipfile %s: %s', zipfilename, err)
zipfile_object = ''
zipfile_cache[zipfilename] = zipfile_object
if zipfile_object == '':
return None
try:
data = zipfile_object.read(name)
return data
except (KeyError, RuntimeError), err:
return None
def get_template_sources(template_dirs=None):
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
if template_dir.endswith(".zip"):
yield template_dir#os.path.join(template_dir, zip_name)
def load_template_source(template_name, template_dirs=None):
tried = []
logging.debug("zip_loader::load_template_source:"+template_name)
## spart= template_name.split('/')
## theme_name=spart[0]
##
## zipfile=theme_name+".zip"
## template_file=os.path.join(theme_name,*spart[1:])
template_file='/'.join((_TEMPLATES_, template_name))
for zipfile in get_template_sources(template_dirs):
try:
return (get_from_zipfile(zipfile,template_file), os.path.join(zipfile,template_file))
except IOError:
tried.append(zipfile)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist, error_msg
load_template_source.is_usable = True
| Python |
from django.template import Library
from django.template import Node, NodeList, Template, Context
from django.template import TemplateSyntaxError, VariableDoesNotExist, BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END, SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END
register = Library()
class RecurseNode( Node ):
def __init__(self, **kwargs):
self.loopvar, self.sequence = kwargs['loopvar'], kwargs['sequence']
self.children_name = kwargs['children_name']
self.nodelist_first, self.nodelist_second = kwargs['nodelist_first'], kwargs['nodelist_second']
del kwargs['nodelist_first'], kwargs['nodelist_second'], kwargs['sequence'], kwargs['children_name'], kwargs['loopvar']
self.kwargs = kwargs
def __repr__(self):
reversed_text = self.is_reversed and ' reversed' or ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_first:
yield node
for node in self.nodelist_second:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend( self.nodelist_first.get_nodes_by_type(nodetype) )
nodes.extend( self.nodelist_second.get_nodes_by_type(nodetype) )
return nodes
def render(self, context, depth=0, values=False):
nodelist = NodeList()
if 'recurseloop' in context:
parentloop = context['recurseloop']
else:
parentloop = {}
context.push()
# On the first recursion pass, we have no values
if not values:
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
# Create a recurseloop value in the context. We'll update counters on each iteration just below.
loop_dict = context['recurseloop'] = {'parent': parentloop}
loop_dict['depth'] = depth + 1
loop_dict['depth0'] = depth
for i, item in enumerate(values):
# Add the additional arguments to the context
# They come in the form of {'name':(initial,increment)}
# As for now only numbers are supported, but also strings can be multiplied
for k,v in self.kwargs.iteritems():
context[k] = v[0] + v[1]*depth
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i+1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
context[ self.loopvar ] = item
for node in self.nodelist_first:
nodelist.append( node.render(context) )
if len( getattr( item, self.children_name ) ):
nodelist.append( self.render( context, depth+1, getattr( item, self.children_name ) ) )
for node in self.nodelist_second:
nodelist.append( node.render(context) )
context.pop()
return nodelist.render(context)
#@register.tag(name="for")
def do_recurse(parser, token):
"""
Recursively loops over each item in an array .
It also increments passed variables on each recursion depth.
For example, to display a list of comments with replies given ``comment_list``:
{% recurse comment in comments children="replies" indent=(0,20) %}
<div style="margin-left:{{indent}}px">{{ comment.text }}</div>
{% endrecurse %}
``children`` is the name of the iterable that contains the children of the current element
``children`` needs to be a property of comment, and is required for the recurseloop to work
You can pass additional parameters after children in the form of:
var_name=(intial_value, increment)
You need to take care of creating the tree structure on your own.
As for now there should be no spaces between the equal ``=``
signs when assigning children or additional variables
In addition to the variables passed, the recurse loop sets a
number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``recurseloop.depth`` The current depth of the loop (1 is the top level)
``recurseloop.depth0`` The current depth of the loop (0 is the top level)
``recurseloop.counter`` The current iteration of the current level(1-indexed)
``recurseloop.counter0`` The current iteration of the current level(0-indexed)
``recurseloop.first`` True if this is the first time through the current level
``recurseloop.last`` True if this is the last time through the current level
``recurseloop.parent`` This is the loop one level "above" the current one
========================== ================================================
You can also use the tag {% yield %} inside a recursion.
The ``yield`` tag will output the same HTML that's between the recurse and endrecurse tags
if the current element has children. If there are no children ``yield`` will output nothing
You must not, however wrap the ``yield`` tag inside other tags, just like you must not wrap
the ``else`` tag inside other tags when making if-else-endif
"""
# We will be throwing this a lot
def tError( contents ):
raise TemplateSyntaxError(
"'recurse' statements should use the format"
"'{%% recurse x in y children=\"iterable_property_name\" "
"arg1=(float,float) arg2=(\"str\",\"str\") %%}: %s" % contents )
bits = token.contents.split()
quotes = ["'","\""]
lenbits = len(bits)
if lenbits < 5:
tError(token.contents)
in_index = 2
children_index = 4
if bits[in_index] != 'in':
tError(token.contents)
children_token = bits[children_index].split("=")
if len(children_token) != 2 or children_token[0] != 'children':
tError(token.contents)
f = children_token[1][0]
l = children_token[1][-1]
if f != l or f not in quotes:
tError(token.contents)
else:
children_token[1] = children_token[1].replace(f,"")
def convert(val):
try:
val = float(val)
except ValueError:
f = val[0]
l = val[-1]
if f != l or f not in quotes:
tError(token.contents)
val = unicode( val.replace(f,"") )
return val
node_vars = {}
if lenbits > 5:
for bit in bits[5:]:
arg = bit.split("=")
if len(arg) != 2 :
tError(token.contents)
f = arg[1][0]
l = arg[1][-1]
if f != "(" or l != ")":
tError(token.contents)
try:
argval = tuple([ convert(x) for x in arg[1].replace("(","").replace(")","").split(",") ])
# Invalid float number, or missing comma
except (IndexError, ValueError):
tError(token.contents)
node_vars[ str(arg[0]) ] = argval
node_vars['children_name'] = children_token[1]
node_vars['loopvar'] = bits[1]
node_vars['sequence'] = parser.compile_filter(bits[3])
nodelist_first = parser.parse( ('yield', 'endrecurse',) )
token = parser.next_token()
if token.contents == 'yield':
nodelist_second = parser.parse( ('endrecurse', ) )
parser.delete_first_token()
else:
nodelist_second = NodeList()
node_vars['nodelist_first'] = nodelist_first
node_vars['nodelist_second'] = nodelist_second
return RecurseNode(**node_vars)
do_recurse = register.tag("recurse", do_recurse)
| Python |
#!/usr/bin/env python
"""Simple PNG Canvas for Python"""
__version__ = "0.8"
__author__ = "Rui Carmo (http://the.taoofmac.com)"
__copyright__ = "CC Attribution-NonCommercial-NoDerivs 2.0 Rui Carmo"
__contributors__ = ["http://collaboa.weed.rbse.com/repository/file/branches/pgsql/lib/spark_pr.rb"], ["Eli Bendersky"]
import zlib, struct
signature = struct.pack("8B", 137, 80, 78, 71, 13, 10, 26, 10)
# alpha blends two colors, using the alpha given by c2
def blend(c1, c2):
return [c1[i]*(0xFF-c2[3]) + c2[i]*c2[3] >> 8 for i in range(3)]
# calculate a new alpha given a 0-0xFF intensity
def intensity(c,i):
return [c[0],c[1],c[2],(c[3]*i) >> 8]
# calculate perceptive grayscale value
def grayscale(c):
return int(c[0]*0.3 + c[1]*0.59 + c[2]*0.11)
# calculate gradient colors
def gradientList(start,end,steps):
delta = [end[i] - start[i] for i in range(4)]
grad = []
for i in range(steps+1):
grad.append([start[j] + (delta[j]*i)/steps for j in range(4)])
return grad
class PNGCanvas:
def __init__(self, width, height,bgcolor=[0xff,0xff,0xff,0xff],color=[0,0,0,0xff]):
self.canvas = []
self.width = width
self.height = height
self.color = color #rgba
bgcolor = bgcolor[0:3] # we don't need alpha for background
for i in range(height):
self.canvas.append([bgcolor] * width)
def point(self,x,y,color=None):
if x<0 or y<0 or x>self.width-1 or y>self.height-1: return
if color == None: color = self.color
self.canvas[y][x] = blend(self.canvas[y][x],color)
def _rectHelper(self,x0,y0,x1,y1):
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if x0 > x1: x0, x1 = x1, x0
if y0 > y1: y0, y1 = y1, y0
return [x0,y0,x1,y1]
def verticalGradient(self,x0,y0,x1,y1,start,end):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
grad = gradientList(start,end,y1-y0)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
self.point(x,y,grad[y-y0])
def rectangle(self,x0,y0,x1,y1):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
self.polyline([[x0,y0],[x1,y0],[x1,y1],[x0,y1],[x0,y0]])
def filledRectangle(self,x0,y0,x1,y1):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
self.point(x,y,self.color)
def copyRect(self,x0,y0,x1,y1,dx,dy,destination):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
destination.canvas[dy+y-y0][dx+x-x0] = self.canvas[y][x]
def blendRect(self,x0,y0,x1,y1,dx,dy,destination,alpha=0xff):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
rgba = self.canvas[y][x] + [alpha]
destination.point(dx+x-x0,dy+y-y0,rgba)
# draw a line using Xiaolin Wu's antialiasing technique
def line(self,x0, y0, x1, y1):
# clean params
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if y0>y1:
y0, y1, x0, x1 = y1, y0, x1, x0
dx = x1-x0
if dx < 0:
sx = -1
else:
sx = 1
dx *= sx
dy = y1-y0
# 'easy' cases
if dy == 0:
for x in range(x0,x1,sx):
self.point(x, y0)
return
if dx == 0:
for y in range(y0,y1):
self.point(x0, y)
self.point(x1, y1)
return
if dx == dy:
for x in range(x0,x1,sx):
self.point(x, y0)
y0 = y0 + 1
return
# main loop
self.point(x0, y0)
e_acc = 0
if dy > dx: # vertical displacement
e = (dx << 16) / dy
for i in range(y0,y1-1):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if (e_acc <= e_acc_temp):
x0 = x0 + sx
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color,(w)))
y0 = y0 + 1
self.point(x0 + sx, y0, intensity(self.color,(0xFF-w)))
self.point(x1, y1)
return
# horizontal displacement
e = (dy << 16) / dx
for i in range(x0,x1-sx,sx):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if (e_acc <= e_acc_temp):
y0 = y0 + 1
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color,(w)))
x0 = x0 + sx
self.point(x0, y0 + 1, intensity(self.color,(0xFF-w)))
self.point(x1, y1)
def polyline(self,arr):
for i in range(0,len(arr)-1):
self.line(arr[i][0],arr[i][1],arr[i+1][0], arr[i+1][1])
def dump(self):
raw_list = []
for y in range(self.height):
raw_list.append(chr(0)) # filter type 0 (None)
for x in range(self.width):
raw_list.append(struct.pack("!3B",*self.canvas[y][x]))
raw_data = ''.join(raw_list)
# 8-bit image represented as RGB tuples
# simple transparency, alpha is pure white
return signature + \
self.pack_chunk('IHDR', struct.pack("!2I5B",self.width,self.height,8,2,0,0,0)) + \
self.pack_chunk('tRNS', struct.pack("!6B",0xFF,0xFF,0xFF,0xFF,0xFF,0xFF)) + \
self.pack_chunk('IDAT', zlib.compress(raw_data,9)) + \
self.pack_chunk('IEND', '')
def pack_chunk(self,tag,data):
to_check = tag + data
return struct.pack("!I",len(data)) + to_check + struct.pack("!I", zlib.crc32(to_check) & 0xFFFFFFFF)
def load(self,f):
assert f.read(8) == signature
self.canvas=[]
for tag, data in self.chunks(f):
if tag == "IHDR":
( width,
height,
bitdepth,
colortype,
compression, filter, interlace ) = struct.unpack("!2I5B",data)
self.width = width
self.height = height
if (bitdepth,colortype,compression, filter, interlace) != (8,2,0,0,0):
raise TypeError('Unsupported PNG format')
# we ignore tRNS because we use pure white as alpha anyway
elif tag == 'IDAT':
raw_data = zlib.decompress(data)
rows = []
i = 0
for y in range(height):
filtertype = ord(raw_data[i])
i = i + 1
cur = [ord(x) for x in raw_data[i:i+width*3]]
if y == 0:
rgb = self.defilter(cur,None,filtertype)
else:
rgb = self.defilter(cur,prev,filtertype)
prev = cur
i = i+width*3
row = []
j = 0
for x in range(width):
pixel = rgb[j:j+3]
row.append(pixel)
j = j + 3
self.canvas.append(row)
def defilter(self,cur,prev,filtertype,bpp=3):
if filtertype == 0: # No filter
return cur
elif filtertype == 1: # Sub
xp = 0
for xc in range(bpp,len(cur)):
cur[xc] = (cur[xc] + cur[xp]) % 256
xp = xp + 1
elif filtertype == 2: # Up
for xc in range(len(cur)):
cur[xc] = (cur[xc] + prev[xc]) % 256
elif filtertype == 3: # Average
xp = 0
for xc in range(len(cur)):
cur[xc] = (cur[xc] + (cur[xp] + prev[xc])/2) % 256
xp = xp + 1
elif filtertype == 4: # Paeth
xp = 0
for i in range(bpp):
cur[i] = (cur[i] + prev[i]) % 256
for xc in range(bpp,len(cur)):
a = cur[xp]
b = prev[xc]
c = prev[xp]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
value = a
elif pb <= pc:
value = b
else:
value = c
cur[xc] = (cur[xc] + value) % 256
xp = xp + 1
else:
raise TypeError('Unrecognized scanline filter type')
return cur
def chunks(self,f):
while 1:
try:
length = struct.unpack("!I",f.read(4))[0]
tag = f.read(4)
data = f.read(length)
crc = struct.unpack("!i",f.read(4))[0]
except:
return
if zlib.crc32(tag + data) != crc:
raise IOError
yield [tag,data]
if __name__ == '__main__':
width = 128
height = 64
print "Creating Canvas..."
c = PNGCanvas(width,height)
c.color = [0xff,0,0,0xff]
c.rectangle(0,0,width-1,height-1)
print "Generating Gradient..."
c.verticalGradient(1,1,width-2, height-2,[0xff,0,0,0xff],[0x20,0,0xff,0x80])
print "Drawing Lines..."
c.color = [0,0,0,0xff]
c.line(0,0,width-1,height-1)
c.line(0,0,width/2,height-1)
c.line(0,0,width-1,height/2)
# Copy Rect to Self
print "Copy Rect"
c.copyRect(1,1,width/2-1,height/2-1,0,height/2,c)
# Blend Rect to Self
print "Blend Rect"
c.blendRect(1,1,width/2-1,height/2-1,width/2,0,c)
# Write test
print "Writing to file..."
f = open("test.png", "wb")
f.write(c.dump())
f.close()
# Read test
print "Reading from file..."
f = open("test.png", "rb")
c.load(f)
f.close()
# Write back
print "Writing to new file..."
f = open("recycle.png","wb")
f.write(c.dump())
f.close()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright(C) 2008 SupDo.com
# Licensed under the GUN License, Version 3.0 (the "License");
#
# File: safecode.py
# Author: KuKei
# Create Date: 2008-07-16
# Description: 负责验证码生成。
# Modify Date: 2008-08-06
import md5
import random
from pngcanvas import PNGCanvas
class Image():
text = None
md5Text = None
img = None
width = 0
height = 0
#长度
textX = 10
textY = 10
beginX = 5
endX = 5
beginY = 5
endY = 5
spare = 4
def __init__(self,text=None):
if(text==None):
self.text = self.getRandom()
else:
self.text = text
#self.getMd5Text()
self.width = len(str(self.text))*(self.spare+self.textX)+self.beginX+self.endX
self.height = self.textY + self.beginY + self.endY
def create(self):
self.img = PNGCanvas(self.width,self.height)
self.img.color = [0xff,0xff,0xff,0xff]
#self.img.color = [0x39,0x9e,0xff,0xff]
#self.img.verticalGradient(1,1,self.width-2, self.height-2,[0xff,0,0,0xff],[0x60,0,0xff,0x80])
self.img.verticalGradient(1,1,self.width-2, self.height-2,[0xff,0x45,0x45,0xff],[0xff,0xcb,0x44,0xff])
for i in range(4):
a = str(self.text)[i]
self.writeText(a,i)
return self.img.dump()
def getRandom(self):
intRand = random.randrange(1000,9999)
return intRand
def getMd5Text(self):
m = md5.new()
m.update(str(self.text))
self.md5Text = m.hexdigest()
def writeText(self,text,pos=0):
if(text=="1"):
self.writeLine(pos, "avc")
elif(text=="2"):
self.writeLine(pos, "aht")
self.writeLine(pos, "hvtr")
self.writeLine(pos, "ahc")
self.writeLine(pos, "hvbl")
self.writeLine(pos, "ahb")
elif(text=="3"):
self.writeLine(pos, "aht")
self.writeLine(pos, "ahc")
self.writeLine(pos, "ahb")
self.writeLine(pos, "avr")
elif(text=="4"):
self.writeLine(pos, "hvtl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "avc")
elif(text=="5"):
self.writeLine(pos, "aht")
self.writeLine(pos, "hvtl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "hvbr")
self.writeLine(pos, "ahb")
elif(text=="6"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "hvbr")
self.writeLine(pos, "ahb")
elif(text=="7"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avr")
elif(text=="8"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "avr")
self.writeLine(pos, "ahb")
elif(text=="9"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avr")
self.writeLine(pos, "ahc")
self.writeLine(pos, "ahb")
self.writeLine(pos, "hvtl")
elif(text=="0"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avl")
self.writeLine(pos, "avr")
self.writeLine(pos, "ahb")
'''
type解释
a:全部,部分上下
h:一半
h:横
v:竖
l:左,上
c:中间
r:右,下
t:上
b:下
'''
def writeLine(self,pos,type):
if(type=="avl"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY,
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY
)
elif(type=="avc"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX/2,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX/2,
self.beginY+self.textY
)
elif(type=="avr"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY
)
elif(type=="aht"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY,
)
elif(type=="ahc"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY/2,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY/2
)
elif(type=="ahb"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY
)
elif(type=="hvtl"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY,
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY/2
)
elif(type=="hvtr"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY/2
)
elif(type=="hvbl"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY/2,
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY
)
elif(type=="hvbr"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY/2,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY
)
| Python |
"""tblib.py: A Trackback (client) implementation in Python
"""
__author__ = "Matt Croydon <matt@ooiio.com>"
__copyright__ = "Copyright 2003, Matt Croydon"
__license__ = "GPL"
__version__ = "0.1.0"
__history__ = """
0.1.0: 1/29/03 - Code cleanup, release. It can send pings, and autodiscover a URL to ping.
0.0.9: 1/29/03 - Basic error handling and autodiscovery works!
0.0.5: 1/29/03 - Internal development version. Working on autodiscovery and error handling.
0.0.4: 1/22/03 - First public release, code cleanup.
0.0.3: 1/22/03 - Removed hard coding that was used for testing.
0.0.2: 1/21/03 - First working version.
0.0.1: 1/21/03 - Initial version. Thanks to Mark Pilgrim for helping me figure some module basics out.
"""
import httplib, urllib, urlparse, re
from google.appengine.api import urlfetch
import logging
"""Everything I needed to know about trackback I learned from the trackback tech specs page
http://www.movabletype.org/docs/mttrackback.html. All arguments are optional. This allows us to create an empty TrackBack object,
then use autodiscovery to populate its attributes.
"""
class TrackBack:
def __init__(self, tbUrl=None, title=None, excerpt=None, url=None, blog_name=None):
self.tbUrl = tbUrl
self.title = title
self.excerpt = excerpt
self.url = url
self.blog_name = blog_name
self.tbErrorCode = None
self.tbErrorMessage = None
def ping(self):
# Only execute if a trackback url has been defined.
if self.tbUrl:
# Create paramaters and make them play nice with HTTP
# Python's httplib example helps a lot:
# http://python.org/doc/current/lib/httplib-examples.html
params = urllib.urlencode({'title': self.title, 'url': self.url, 'excerpt': self.excerpt, 'blog_name': self.blog_name})
headers = ({"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "micolog"})
# urlparse is my hero
# http://www.python.org/doc/current/lib/module-urlparse.html
logging.info("ping...%s",params)
response=urlfetch.fetch(self.tbUrl,method=urlfetch.POST,payload=params,headers=headers)
self.httpResponse = response.status_code
data = response.content
self.tbResponse = data
logging.info("ping...%s"%data)
# Thanks to Steve Holden's book: _Python Web Programming_ (http://pydish.holdenweb.com/pwp/)
# Why parse really simple XML when you can just use regular expressions? Rawk.
errorpattern = r'<error>(.*?)</error>'
reg = re.search(errorpattern, self.tbResponse)
if reg:
self.tbErrorCode = reg.group(1)
if int(self.tbErrorCode) == 1:
errorpattern2 = r'<message>(.*?)</message>'
reg2 = re.search(errorpattern2, self.tbResponse)
if reg2:
self.tbErrorMessage = reg2.group(1)
else:
return 1
def autodiscover(self, urlToCheck):
response=urlfetch.fetch(urlToCheck)
data = response.read()
tbpattern = r'trackback:ping="(.*?)"'
reg = re.search(tbpattern, data)
if reg:
self.tbUrl = reg.group(1) | Python |
# -*- coding: utf-8 -*-
"""
A Python HTML filtering library - html_filter.py, v 1.15.4
Translated to Python by Samuel Adam <samuel.adam@gmail.com>
http://amisphere.com/contrib/python-html-filter/
Original PHP code ( lib_filter.php, v 1.15 ) by Cal Henderson <cal@iamcal.com>
http://iamcal.com/publish/articles/php/processing_html/
http://iamcal.com/publish/articles/php/processing_html_part_2/
This code is licensed under a Creative Commons Attribution-ShareAlike 2.5 License
http://creativecommons.org/licenses/by-sa/2.5/
"""
import re
from cgi import escape
from HTMLParser import HTMLParser
class html_filter:
"""
html_filter removes HTML tags that do not belong to a white list
closes open tags and fixes broken ones
removes javascript injections and black listed URLs
makes text URLs and emails clickable
adds rel="no-follow" to links except for white list
default settings are based on Flickr's "Some HTML is OK"
http://www.flickr.com/html.gne
HOWTO
1. Basic example
from html_filter import html_filter
filter = html_filter()
#change settings to meet your needs
filter.strip_comments = False
filter.allowed['br'] = ()
filter.no_close += 'br',
raw_html = '<p><strong><br><!-- Text to filter !!!<div></p>'
# go() is a shortcut to apply the most common methods
filtered_html = filter.go(raw_html)
# returns <strong><br /><!-- Text to filter !!!</strong>
2. You can only use one method at a time if you like
from html_filter import html_filter
filter = html_filter()
please_dont_scream_this_is_a_pop_contest = filter.fix_case('HARD ROCK ALELUYAH!!!')
# returns Hard rock aleluyah!!!
filter.break_words_longer_than = 30
wordwrap_text = filter.break_words('MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM...')
# adds html entity "​" (zero width space) each 30 characters
"""
def __init__(self):
### START Default Config ###
# tags and attributes that are allowed
self.allowed = {
'a': ('href', 'target'),
'b': (),
'blockquote': (),
'em': (),
'i': (),
'img': ('src', 'width', 'height', 'alt', 'title'),
'strong': (),
'u': (),
}
# tags which should always be self-closing (e.g. "<img />")
self.no_close = (
'img',
)
# tags which must always have seperate opening and closing tags (e.g. "<b></b>")
self.always_close = (
'a',
'b',
'blockquote',
'em',
'i',
'strong',
'u',
)
# tags which should be removed if they contain no content (e.g. "<b></b>" or "<b />")
self.remove_blanks = (
'a',
'b',
'blockquote',
'em',
'i',
'strong',
'u',
)
# attributes which should be checked for valid protocols
self.protocol_attributes = (
'src',
'href',
)
# protocols which are allowed
self.allowed_protocols = (
'http',
'https',
'ftp',
'mailto',
)
# forbidden urls ( regular expressions ) are replaced by #
self.forbidden_urls = (
r'^/delete-account',
r'^domain.ext/delete-account',
)
# should we make urls clickable ?
self.make_clickable_urls = True
# should we add a rel="nofollow" to the links ?
self.add_no_follow = True
# except for those domains
self.follow_for = (
'allowed-domain.ext',
)
# should we remove comments?
self.strip_comments = True
# should we removes blanks from beginning and end of data ?
self.strip_data = True
# should we try and make a b tag out of "b>"
self.always_make_tags = False
# entity control options
self.allow_numbered_entities = True
self.allowed_entities = (
'amp',
'gt',
'lt',
'quot',
)
# should we "break" words longer than x chars ( 0 means "No", minimum is 8 chars )
self.break_words_longer_than = 0
### END Default Config ###
# INIT
self.tag_counts = {}
# pre-compile some regexp patterns
self.pat_entities = re.compile(r'&([^&;]*)(?=(;|&|$))')
self.pat_quotes = re.compile(r'(>|^)([^<]+?)(<|$)', re.DOTALL|re.IGNORECASE)
self.pat_valid_entity = re.compile(r'^#([0-9]+)$', re.IGNORECASE)
self.pat_decode_entities_dec = re.compile(r'(&)#(\d+);?')
self.pat_decode_entities_hex = re.compile(r'(&)#x([0-9a-f]+);?', re.IGNORECASE)
self.pat_decode_entities_hex2 = re.compile(r'(%)([0-9a-f]{2});?', re.IGNORECASE)
self.pat_entities2 = re.compile(r'&([^&;]*);?', re.IGNORECASE)
self.pat_raw_url = re.compile('(('+'|'.join(self.allowed_protocols)+')://)(([a-z0-9](?:[a-z0-9\\-]*[a-z0-9])?\\.)+(com\\b|edu\\b|biz\\b|gov\\b|in(?:t|fo)\\b|mil\\b|net\\b|org\\b|[a-z][a-z]\\b)|((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])))(:\\d+)?(/[-a-z0-9_:\\\\@&?=+,\\.!/~*\'%\\$]*)*(?<![.,?!])(?!((?!(?:<a )).)*?(?:</a>))(?!((?!(?:<!--)).)*?(?:-->))', re.IGNORECASE)
#
def go(self, data):
data = self.strip_whitespace(data)
data = self.escape_comments(data)
data = self.balance_html(data)
data = self.clickable_urls(data)
data = self.check_tags(data)
data = self.process_remove_blanks(data)
data = self.validate_entities(data)
data = self.break_words(data)
return data
#
def strip_whitespace(self, data):
if self.strip_data:
data = data.strip()
return data
#
def escape_comments(self, data):
pat = re.compile(r'<!--(.*?)-->', re.IGNORECASE)
data = re.sub(pat, self.f0, data)
return data
def f0(self, m):
return '<!--'+escape(m.group(1), True)+'-->'
#
def balance_html(self, data):
# try and form html
if self.always_make_tags:
data = re.sub(r'>>+', r'>', data)
data = re.sub(r'<<+', r'<', data)
data = re.sub(r'^>', r'', data)
data = re.sub(r'<([^>]*?)(?=<|$)', r'<\1>', data)
data = re.sub(r'(^|>)([^<]*?)(?=>)', r'\1<\2', data)
else:
data = data.replace('<>', '<>') # <> as text
data = self.re_sub_overlap(r'<([^>]*?)(?=<|$)', r'<\1', data)
data = self.re_sub_overlap(r'(^|>)([^<]*?)(?=>)', r'\1\2><', data)
data = re.sub(r'<(\s)+?', r'<\1', data) # consider "< a href" as "< a href"
# this filter introduces an error, so we correct it
data = data.replace('<>', '')
return data
# python re.sub() doesn't overlap matches
def re_sub_overlap(self, pat, repl, data, i=0):
data_temp = re.sub(pat, repl, data[i:])
if data_temp != data[i:]:
data = data[:i] + data_temp
i += 1
data = self.re_sub_overlap(pat, repl, data, i)
return data
#
def clickable_urls(self, data):
if self.make_clickable_urls:
# urls
# pat = re.compile('(('+'|'.join(self.allowed_protocols)+')://)(([a-z0-9](?:[a-z0-9\\-]*[a-z0-9])?\\.)+(com\\b|edu\\b|biz\\b|gov\\b|in(?:t|fo)\\b|mil\\b|net\\b|org\\b|[a-z][a-z]\\b)|((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])))(:\\d+)?(/[-a-z0-9_:\\\\@&?=+,\\.!/~*\'%\\$]*)*(?<![.,?!])(?!((?!(?:<a )).)*?(?:</a>))(?!((?!(?:<!--)).)*?(?:-->))', re.IGNORECASE)
data = re.sub(self.pat_raw_url, self.f7, data)
# emails
if 'mailto' in self.allowed_protocols:
pat = re.compile(r'((([a-z]|[0-9]|!|#|$|%|&|\'|\*|\+|\-|/|=|\?|\^|_|`|\{|\||\}|~)+(\.([a-z]|[0-9]|!|#|$|%|&|\'|\*|\+|\-|/|=|\?|\^|_|`|\{|\||\}|~)+)*)@((((([a-z]|[0-9])([a-z]|[0-9]|\-){0,61}([a-z]|[0-9])\.))*([a-z]|[0-9])([a-z]|[0-9]|\-){0,61}([a-z]|[0-9])\.(com|edu|gov|int|mil|net|org|biz|info|name|pro|aero|coop|museum|arpa|[a-z]{2}))|(((([0-9]){1,3}\.){3}([0-9]){1,3}))|(\[((([0-9]){1,3}\.){3}([0-9]){1,3})\])))(?!((?!(?:<a )).)*?(?:</a>))(?!((?!(?:<!--)).)*?(?:-->))', re.IGNORECASE)
data = re.sub(pat, self.f8, data)
return data
def f7(self, m):
return '<a href="'+m.group(0)+'">'+m.group(0)+'</a>'
def f8(self, m):
return '<a href="mailto:'+m.group(0)+'">'+m.group(0)+'</a>'
#
def check_tags(self, data):
# compile loop regexps
self.pat_end_tag = re.compile(r'^/([a-z0-9]+)', re.DOTALL|re.IGNORECASE)
self.pat_start_tag = re.compile(r'^([a-z0-9]+)(.*?)(/?)$', re.DOTALL|re.IGNORECASE)
self.pat_matches_2 = re.compile(r'([a-z0-9]+)=(["\'])(.*?)\2', re.DOTALL|re.IGNORECASE) # <foo a="b" />
self.pat_matches_1 = re.compile(r'([a-z0-9]+)(=)([^"\s\']+)', re.DOTALL|re.IGNORECASE) # <foo a=b />
self.pat_matches_3 = re.compile(r'([a-z0-9]+)=(["\'])([^"\']*?)\s*$', re.DOTALL|re.IGNORECASE) # <foo a="b />
self.pat_comments = re.compile(r'^!--(.*)--$', re.DOTALL|re.IGNORECASE)
self.pat_param_protocol = re.compile(r'^([^:]+):', re.DOTALL|re.IGNORECASE)
pat = re.compile(r'<(.*?)>', re.DOTALL)
data = re.sub(pat, self.f1, data)
for tag in self.tag_counts:
count = self.tag_counts[tag]
for i in range(count):
data += '</'+tag+'>'
self.tag_counts = {}
return data
def f1(self, m):
return self.process_tag(m.group(1))
#
def process_tag(self, data):
# ending tags
m = re.match(self.pat_end_tag, data)
if m:
name = m.group(1).lower()
if name in self.allowed:
if name not in self.no_close:
if self.tag_counts.has_key(name):
self.tag_counts[name] -= 1
return '</' + name + '>'
else:
return ''
# starting tags
m = re.match(self.pat_start_tag, data)
if m:
name = m.group(1).lower()
body = m.group(2)
ending = m.group(3)
if name in self.allowed:
params = ''
matches_2 = re.findall(self.pat_matches_2, body) # <foo a="b" />
matches_1 = re.findall(self.pat_matches_1, body) # <foo a=b />
matches_3 = re.findall(self.pat_matches_3, body) # <foo a="b />
matches = {}
for match in matches_3:
matches[match[0].lower()] = match[2]
for match in matches_1:
matches[match[0].lower()] = match[2]
for match in matches_2:
matches[match[0].lower()] = match[2]
for pname in matches:
if pname in self.allowed[name]:
value = matches[pname]
if pname in self.protocol_attributes:
processed_value = self.process_param_protocol(value)
# add no_follow
if self.add_no_follow and name== 'a' and pname == 'href' and processed_value == value:
processed_value = re.sub(self.pat_raw_url, self.f9, processed_value)
value = processed_value
params += ' '+pname+'="'+value+'"'
if name in self.no_close:
ending = ' /'
if name in self.always_close:
ending = ''
if not ending:
if self.tag_counts.has_key(name):
self.tag_counts[name] += 1
else:
self.tag_counts[name] = 1
if ending:
ending = ' /'
return '<'+name+params+ending+'>'
else:
return ''
# comments
m = re.match(self.pat_comments, data)
if m:
if self.strip_comments:
return ''
else:
return '<'+data+'>'
# garbage, ignore it
return ''
def f9(self, m):
if m.group(3) not in self.follow_for:
return m.group()+'" rel="no-follow'
return m.group()
#
def process_param_protocol(self, data):
data = self.decode_entities(data)
m = re.match(self.pat_param_protocol, data)
if m:
if not m.group(1) in self.allowed_protocols:
start = len(m.group(1)) + 1
data = '#' + data[start:]
# remove forbidden urls
for pat in self.forbidden_urls:
m = re.search(pat, data)
if m:
data = '#'
return data
#
def process_remove_blanks(self, data):
for tag in self.remove_blanks:
data = re.sub(r'<'+tag+'(\s[^>]*)?></'+tag+'>', r'', data)
data = re.sub(r'<'+tag+'(\s[^>]*)?/>', r'', data)
return data
#
def strip_tags(self, html):
result = []
parser = HTMLParser()
parser.handle_data = result.append
parser.feed(html)
parser.close()
return ''.join(result)
def fix_case(self, data):
# compile loop regexps
self.pat_case_inner = re.compile(r'(^|[^\w\s\';,\\-])(\s*)([a-z])')
data_notags = self.strip_tags(data)
data_notags = re.sub(r'[^a-zA-Z]', r'', data_notags)
if len(data_notags) < 5:
return data
m = re.search(r'[a-z]', data_notags)
if m:
return data
pat = re.compile(r'(>|^)([^<]+?)(<|$)', re.DOTALL)
data = re.sub(pat, self.f2, data)
return data
def f2(self, m):
return m.group(1)+self.fix_case_inner(m.group(2))+m.group(3)
def fix_case_inner(self, data):
return re.sub(self.pat_case_inner, self.f3, data.lower())
def f3(self, m):
return m.group(1)+m.group(2)+m.group(3).upper()
#
def validate_entities(self, data):
# validate entities throughout the string
data = re.sub(self.pat_entities, self.f4, data)
# validate quotes outside of tags
data = re.sub(self.pat_quotes, self.f5, data)
return data
def f4(self, m):
return self.check_entity(m.group(1), m.group(2))
def f5(self, m):
return m.group(1)+m.group(2).replace('"', '"')+m.group(3)
#
def check_entity(self, preamble, term):
if term != ';':
return '&'+preamble
if self.is_valid_entity(preamble):
return '&'+preamble
return '&'+preamble
def is_valid_entity(self, entity):
m = re.match(self.pat_valid_entity, entity)
if m:
if int(m.group(1)) > 127:
return True
return self.allow_numbered_entities
if entity in self.allowed_entities:
return True
return False
#
# within attributes, we want to convert all hex/dec/url escape sequences into
# their raw characters so that we can check we don't get stray quotes/brackets
# inside strings
def decode_entities(self, data):
data = re.sub(self.pat_decode_entities_dec, self.decode_dec_entity, data)
data = re.sub(self.pat_decode_entities_hex, self.decode_hex_entity, data)
data = re.sub(self.pat_decode_entities_hex2, self.decode_hex_entity, data)
data = self.validate_entities(data)
return data
def decode_hex_entity(self, m):
return self.decode_num_entity(m.group(1), int(m.group(2), 16))
def decode_dec_entity(self, m):
return self.decode_num_entity(m.group(1), int(m.group(2)))
def decode_num_entity(self, orig_type, d):
if d < 0:
d = 32 # space
if d > 127:
if orig_type == '%':
return '%' + hex(d)[2:]
if orig_type == '&':
return '&#'+str(d)+';'
return escape(chr(d))
#
def break_words(self, data):
if self.break_words_longer_than > 0:
pat = re.compile(r'(>|^)([\s]*)([^<]+?)([\s]*)(<|$)', re.DOTALL)
data = re.sub(pat, self.f6, data)
return data
def f6(self, m):
return m.group(1)+m.group(2)+self.break_text(m.group(3))+m.group(4)+m.group(5)
def break_text(self, text):
ret = ''
entity_max_length = 8
if self.break_words_longer_than < entity_max_length:
width = entity_max_length
else:
width = self.break_words_longer_than
for word in text.split(' '):
if len(word) > width:
word = word.replace('​','')
m = re.search(self.pat_entities2, word[width-entity_max_length:width+entity_max_length])
if m:
width = width - entity_max_length + m.end()
ret += word[0:width] + '​' + self.break_text(word[width:]) # insert "Zero Width" Space - helps wordwrap
else:
ret += word + ' '
return ret.strip()
| Python |
from time import *
from calendar import timegm
# fix for mktime bug
# https://garage.maemo.org/tracker/index.php?func=detail&aid=4453&group_id=854&atid=3201
mktime = lambda time_tuple: calendar.timegm(time_tuple) + timezone
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright(C) 2008 SupDo.com
# Licensed under the GUN License, Version 3.0 (the "License");
#
# File: safecode.py
# Author: KuKei
# Create Date: 2008-07-16
# Description: 负责验证码生成。
# Modify Date: 2008-08-06
import md5
import random
from pngcanvas import PNGCanvas
class Image():
text = None
md5Text = None
img = None
width = 0
height = 0
#长度
textX = 10
textY = 10
beginX = 5
endX = 5
beginY = 5
endY = 5
spare = 4
def __init__(self,text=None):
if(text==None):
self.text = self.getRandom()
else:
self.text = text
#self.getMd5Text()
self.width = len(str(self.text))*(self.spare+self.textX)+self.beginX+self.endX
self.height = self.textY + self.beginY + self.endY
def create(self):
self.img = PNGCanvas(self.width,self.height)
self.img.color = [0xff,0xff,0xff,0xff]
#self.img.color = [0x39,0x9e,0xff,0xff]
#self.img.verticalGradient(1,1,self.width-2, self.height-2,[0xff,0,0,0xff],[0x60,0,0xff,0x80])
self.img.verticalGradient(1,1,self.width-2, self.height-2,[0xff,0x45,0x45,0xff],[0xff,0xcb,0x44,0xff])
for i in range(4):
a = str(self.text)[i]
self.writeText(a,i)
return self.img.dump()
def getRandom(self):
intRand = random.randrange(1000,9999)
return intRand
def getMd5Text(self):
m = md5.new()
m.update(str(self.text))
self.md5Text = m.hexdigest()
def writeText(self,text,pos=0):
if(text=="1"):
self.writeLine(pos, "avc")
elif(text=="2"):
self.writeLine(pos, "aht")
self.writeLine(pos, "hvtr")
self.writeLine(pos, "ahc")
self.writeLine(pos, "hvbl")
self.writeLine(pos, "ahb")
elif(text=="3"):
self.writeLine(pos, "aht")
self.writeLine(pos, "ahc")
self.writeLine(pos, "ahb")
self.writeLine(pos, "avr")
elif(text=="4"):
self.writeLine(pos, "hvtl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "avc")
elif(text=="5"):
self.writeLine(pos, "aht")
self.writeLine(pos, "hvtl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "hvbr")
self.writeLine(pos, "ahb")
elif(text=="6"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "hvbr")
self.writeLine(pos, "ahb")
elif(text=="7"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avr")
elif(text=="8"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "avr")
self.writeLine(pos, "ahb")
elif(text=="9"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avr")
self.writeLine(pos, "ahc")
self.writeLine(pos, "ahb")
self.writeLine(pos, "hvtl")
elif(text=="0"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avl")
self.writeLine(pos, "avr")
self.writeLine(pos, "ahb")
'''
type解释
a:全部,部分上下
h:一半
h:横
v:竖
l:左,上
c:中间
r:右,下
t:上
b:下
'''
def writeLine(self,pos,type):
if(type=="avl"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY,
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY
)
elif(type=="avc"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX/2,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX/2,
self.beginY+self.textY
)
elif(type=="avr"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY
)
elif(type=="aht"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY,
)
elif(type=="ahc"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY/2,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY/2
)
elif(type=="ahb"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY
)
elif(type=="hvtl"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY,
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY/2
)
elif(type=="hvtr"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY/2
)
elif(type=="hvbl"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY/2,
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY
)
elif(type=="hvbr"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY/2,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY
)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cgi,os
import StringIO
import logging
import re
import sys
import traceback
import urlparse
import webob
import wsgiref.headers
import wsgiref.util
from google.appengine.ext.webapp import *
class RequestHandler(RequestHandler):
def __init__(self):
self.template_vals = {}
def __before__(self,*args):
"""
Allows common code to be used for all get/post/delete methods
"""
pass
def __after__(self,*args):
"""
This runs AFTER response is returned to browser.
If you have follow up work that you don't want to do while
browser is waiting put it here such as sending emails etc
"""
pass
class WSGIApplication2(WSGIApplication):
"""
Modifyed to add new methods __before__ and __after__
before the get/post/delete/etc methods and then
AFTER RESPONSE. This is important because it means you
can do work after the response has been returned to the browser
"""
def __init__(self, url_mapping, debug=False):
"""Initializes this application with the given URL mapping.
Args:
url_mapping: list of (URI, RequestHandler) pairs (e.g., [('/', ReqHan)])
debug: if true, we send Python stack traces to the browser on errors
"""
self._init_url_mappings(url_mapping)
self.__debug = debug
WSGIApplication.active_instance = self
self.current_request_args = ()
def __call__(self, environ, start_response):
"""Called by WSGI when a request comes in."""
request = Request(environ)
response = Response()
WSGIApplication.active_instance = self
handler = None
groups = ()
for regexp, handler_class in self._url_mapping:
match = regexp.match(request.path)
if match:
handler = handler_class()
handler.initialize(request, response)
groups = match.groups()
break
self.current_request_args = groups
if handler:
try:
handler.__before__(*groups)
method = environ['REQUEST_METHOD']
if method == 'GET':
handler.get(*groups)
elif method == 'POST':
handler.post(*groups)
elif method == 'HEAD':
handler.head(*groups)
elif method == 'OPTIONS':
handler.options(*groups)
elif method == 'PUT':
handler.put(*groups)
elif method == 'DELETE':
handler.delete(*groups)
elif method == 'TRACE':
handler.trace(*groups)
else:
handler.error(501)
response.wsgi_write(start_response)
handler.__after__(*groups)
except Exception, e:
handler.handle_exception(e, self.__debug)
else:
response.set_status(404)
response.wsgi_write(start_response)
return ['']
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cgi,os
import StringIO
import logging
import re
import sys
import traceback
import urlparse
import webob
import wsgiref.headers
import wsgiref.util
from google.appengine.ext.webapp import *
class RequestHandler(RequestHandler):
def __init__(self):
self.template_vals = {}
def __before__(self,*args):
"""
Allows common code to be used for all get/post/delete methods
"""
pass
def __after__(self,*args):
"""
This runs AFTER response is returned to browser.
If you have follow up work that you don't want to do while
browser is waiting put it here such as sending emails etc
"""
pass
class WSGIApplication2(WSGIApplication):
"""
Modifyed to add new methods __before__ and __after__
before the get/post/delete/etc methods and then
AFTER RESPONSE. This is important because it means you
can do work after the response has been returned to the browser
"""
def __init__(self, url_mapping, debug=False):
"""Initializes this application with the given URL mapping.
Args:
url_mapping: list of (URI, RequestHandler) pairs (e.g., [('/', ReqHan)])
debug: if true, we send Python stack traces to the browser on errors
"""
self._init_url_mappings(url_mapping)
self.__debug = debug
WSGIApplication.active_instance = self
self.current_request_args = ()
def __call__(self, environ, start_response):
"""Called by WSGI when a request comes in."""
request = Request(environ)
response = Response()
WSGIApplication.active_instance = self
handler = None
groups = ()
for regexp, handler_class in self._url_mapping:
match = regexp.match(request.path)
if match:
handler = handler_class()
handler.initialize(request, response)
groups = match.groups()
break
self.current_request_args = groups
if handler:
try:
handler.__before__(*groups)
method = environ['REQUEST_METHOD']
if method == 'GET':
handler.get(*groups)
elif method == 'POST':
handler.post(*groups)
elif method == 'HEAD':
handler.head(*groups)
elif method == 'OPTIONS':
handler.options(*groups)
elif method == 'PUT':
handler.put(*groups)
elif method == 'DELETE':
handler.delete(*groups)
elif method == 'TRACE':
handler.trace(*groups)
else:
handler.error(501)
response.wsgi_write(start_response)
handler.__after__(*groups)
except Exception, e:
handler.handle_exception(e, self.__debug)
else:
response.set_status(404)
response.wsgi_write(start_response)
return ['']
| Python |
#!/usr/bin/env python
"""Simple PNG Canvas for Python"""
__version__ = "0.8"
__author__ = "Rui Carmo (http://the.taoofmac.com)"
__copyright__ = "CC Attribution-NonCommercial-NoDerivs 2.0 Rui Carmo"
__contributors__ = ["http://collaboa.weed.rbse.com/repository/file/branches/pgsql/lib/spark_pr.rb"], ["Eli Bendersky"]
import zlib, struct
signature = struct.pack("8B", 137, 80, 78, 71, 13, 10, 26, 10)
# alpha blends two colors, using the alpha given by c2
def blend(c1, c2):
return [c1[i]*(0xFF-c2[3]) + c2[i]*c2[3] >> 8 for i in range(3)]
# calculate a new alpha given a 0-0xFF intensity
def intensity(c,i):
return [c[0],c[1],c[2],(c[3]*i) >> 8]
# calculate perceptive grayscale value
def grayscale(c):
return int(c[0]*0.3 + c[1]*0.59 + c[2]*0.11)
# calculate gradient colors
def gradientList(start,end,steps):
delta = [end[i] - start[i] for i in range(4)]
grad = []
for i in range(steps+1):
grad.append([start[j] + (delta[j]*i)/steps for j in range(4)])
return grad
class PNGCanvas:
def __init__(self, width, height,bgcolor=[0xff,0xff,0xff,0xff],color=[0,0,0,0xff]):
self.canvas = []
self.width = width
self.height = height
self.color = color #rgba
bgcolor = bgcolor[0:3] # we don't need alpha for background
for i in range(height):
self.canvas.append([bgcolor] * width)
def point(self,x,y,color=None):
if x<0 or y<0 or x>self.width-1 or y>self.height-1: return
if color == None: color = self.color
self.canvas[y][x] = blend(self.canvas[y][x],color)
def _rectHelper(self,x0,y0,x1,y1):
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if x0 > x1: x0, x1 = x1, x0
if y0 > y1: y0, y1 = y1, y0
return [x0,y0,x1,y1]
def verticalGradient(self,x0,y0,x1,y1,start,end):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
grad = gradientList(start,end,y1-y0)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
self.point(x,y,grad[y-y0])
def rectangle(self,x0,y0,x1,y1):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
self.polyline([[x0,y0],[x1,y0],[x1,y1],[x0,y1],[x0,y0]])
def filledRectangle(self,x0,y0,x1,y1):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
self.point(x,y,self.color)
def copyRect(self,x0,y0,x1,y1,dx,dy,destination):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
destination.canvas[dy+y-y0][dx+x-x0] = self.canvas[y][x]
def blendRect(self,x0,y0,x1,y1,dx,dy,destination,alpha=0xff):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
rgba = self.canvas[y][x] + [alpha]
destination.point(dx+x-x0,dy+y-y0,rgba)
# draw a line using Xiaolin Wu's antialiasing technique
def line(self,x0, y0, x1, y1):
# clean params
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if y0>y1:
y0, y1, x0, x1 = y1, y0, x1, x0
dx = x1-x0
if dx < 0:
sx = -1
else:
sx = 1
dx *= sx
dy = y1-y0
# 'easy' cases
if dy == 0:
for x in range(x0,x1,sx):
self.point(x, y0)
return
if dx == 0:
for y in range(y0,y1):
self.point(x0, y)
self.point(x1, y1)
return
if dx == dy:
for x in range(x0,x1,sx):
self.point(x, y0)
y0 = y0 + 1
return
# main loop
self.point(x0, y0)
e_acc = 0
if dy > dx: # vertical displacement
e = (dx << 16) / dy
for i in range(y0,y1-1):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if (e_acc <= e_acc_temp):
x0 = x0 + sx
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color,(w)))
y0 = y0 + 1
self.point(x0 + sx, y0, intensity(self.color,(0xFF-w)))
self.point(x1, y1)
return
# horizontal displacement
e = (dy << 16) / dx
for i in range(x0,x1-sx,sx):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if (e_acc <= e_acc_temp):
y0 = y0 + 1
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color,(w)))
x0 = x0 + sx
self.point(x0, y0 + 1, intensity(self.color,(0xFF-w)))
self.point(x1, y1)
def polyline(self,arr):
for i in range(0,len(arr)-1):
self.line(arr[i][0],arr[i][1],arr[i+1][0], arr[i+1][1])
def dump(self):
raw_list = []
for y in range(self.height):
raw_list.append(chr(0)) # filter type 0 (None)
for x in range(self.width):
raw_list.append(struct.pack("!3B",*self.canvas[y][x]))
raw_data = ''.join(raw_list)
# 8-bit image represented as RGB tuples
# simple transparency, alpha is pure white
return signature + \
self.pack_chunk('IHDR', struct.pack("!2I5B",self.width,self.height,8,2,0,0,0)) + \
self.pack_chunk('tRNS', struct.pack("!6B",0xFF,0xFF,0xFF,0xFF,0xFF,0xFF)) + \
self.pack_chunk('IDAT', zlib.compress(raw_data,9)) + \
self.pack_chunk('IEND', '')
def pack_chunk(self,tag,data):
to_check = tag + data
return struct.pack("!I",len(data)) + to_check + struct.pack("!I", zlib.crc32(to_check) & 0xFFFFFFFF)
def load(self,f):
assert f.read(8) == signature
self.canvas=[]
for tag, data in self.chunks(f):
if tag == "IHDR":
( width,
height,
bitdepth,
colortype,
compression, filter, interlace ) = struct.unpack("!2I5B",data)
self.width = width
self.height = height
if (bitdepth,colortype,compression, filter, interlace) != (8,2,0,0,0):
raise TypeError('Unsupported PNG format')
# we ignore tRNS because we use pure white as alpha anyway
elif tag == 'IDAT':
raw_data = zlib.decompress(data)
rows = []
i = 0
for y in range(height):
filtertype = ord(raw_data[i])
i = i + 1
cur = [ord(x) for x in raw_data[i:i+width*3]]
if y == 0:
rgb = self.defilter(cur,None,filtertype)
else:
rgb = self.defilter(cur,prev,filtertype)
prev = cur
i = i+width*3
row = []
j = 0
for x in range(width):
pixel = rgb[j:j+3]
row.append(pixel)
j = j + 3
self.canvas.append(row)
def defilter(self,cur,prev,filtertype,bpp=3):
if filtertype == 0: # No filter
return cur
elif filtertype == 1: # Sub
xp = 0
for xc in range(bpp,len(cur)):
cur[xc] = (cur[xc] + cur[xp]) % 256
xp = xp + 1
elif filtertype == 2: # Up
for xc in range(len(cur)):
cur[xc] = (cur[xc] + prev[xc]) % 256
elif filtertype == 3: # Average
xp = 0
for xc in range(len(cur)):
cur[xc] = (cur[xc] + (cur[xp] + prev[xc])/2) % 256
xp = xp + 1
elif filtertype == 4: # Paeth
xp = 0
for i in range(bpp):
cur[i] = (cur[i] + prev[i]) % 256
for xc in range(bpp,len(cur)):
a = cur[xp]
b = prev[xc]
c = prev[xp]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
value = a
elif pb <= pc:
value = b
else:
value = c
cur[xc] = (cur[xc] + value) % 256
xp = xp + 1
else:
raise TypeError('Unrecognized scanline filter type')
return cur
def chunks(self,f):
while 1:
try:
length = struct.unpack("!I",f.read(4))[0]
tag = f.read(4)
data = f.read(length)
crc = struct.unpack("!i",f.read(4))[0]
except:
return
if zlib.crc32(tag + data) != crc:
raise IOError
yield [tag,data]
if __name__ == '__main__':
width = 128
height = 64
print "Creating Canvas..."
c = PNGCanvas(width,height)
c.color = [0xff,0,0,0xff]
c.rectangle(0,0,width-1,height-1)
print "Generating Gradient..."
c.verticalGradient(1,1,width-2, height-2,[0xff,0,0,0xff],[0x20,0,0xff,0x80])
print "Drawing Lines..."
c.color = [0,0,0,0xff]
c.line(0,0,width-1,height-1)
c.line(0,0,width/2,height-1)
c.line(0,0,width-1,height/2)
# Copy Rect to Self
print "Copy Rect"
c.copyRect(1,1,width/2-1,height/2-1,0,height/2,c)
# Blend Rect to Self
print "Blend Rect"
c.blendRect(1,1,width/2-1,height/2-1,width/2,0,c)
# Write test
print "Writing to file..."
f = open("test.png", "wb")
f.write(c.dump())
f.close()
# Read test
print "Reading from file..."
f = open("test.png", "rb")
c.load(f)
f.close()
# Write back
print "Writing to new file..."
f = open("recycle.png","wb")
f.write(c.dump())
f.close()
| Python |
# gmemsess.py - memcache-backed session Class for Google Appengine
# Version 1.4
# Copyright 2008 Greg Fawcett <greg@vig.co.nz>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
from google.appengine.api import memcache
_sidChars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_defaultTimeout=30*60 # 30 min
_defaultCookieName='gsid'
#----------------------------------------------------------------------
class Session(dict):
"""A secure lightweight memcache-backed session Class for Google Appengine."""
#----------------------------------------------------------
def __init__(self,rh,name=_defaultCookieName,timeout=_defaultTimeout):
"""Create a session object.
Keyword arguments:
rh -- the parent's request handler (usually self)
name -- the cookie name (defaults to "gsid")
timeout -- the number of seconds the session will last between
requests (defaults to 1800 secs - 30 minutes)
"""
self.rh=rh # request handler
self._timeout=timeout
self._name=name
self._new=True
self._invalid=False
dict.__init__(self)
if name in rh.request.str_cookies:
self._sid=rh.request.str_cookies[name]
data=memcache.get(self._sid)
if data!=None:
self.update(data)
# memcache timeout is absolute, so we need to reset it on each access
memcache.set(self._sid,data,self._timeout)
self._new=False
return
# Create a new session ID
# There are about 10^14 combinations, so guessing won't work
self._sid=random.choice(_sidChars)+random.choice(_sidChars)+\
random.choice(_sidChars)+random.choice(_sidChars)+\
random.choice(_sidChars)+random.choice(_sidChars)+\
random.choice(_sidChars)+random.choice(_sidChars)
# Added path so session works with any path
rh.response.headers.add_header('Set-Cookie','%s=%s; path=/;'%(name,self._sid))
#----------------------------------------------------------
def save(self):
"""Save session data."""
if not self._invalid:
memcache.set(self._sid,self.copy(),self._timeout)
#----------------------------------------------------------
def is_new(self):
"""Returns True if session was created during this request."""
return self._new
#----------------------------------------------------------
def get_id(self):
"""Returns session id string."""
return self._sid
#----------------------------------------------------------
def invalidate(self):
"""Delete session data and cookie."""
self.rh.response.headers.add_header('Set-Cookie',
'%s=; expires=Sat, 1-Jan-2000 00:00:00 GMT;'%(self._name))
memcache.delete(self._sid)
self.clear()
self._invalid=True
| Python |
import os,logging,re
from model import OptionSet
from google.appengine.ext.webapp import template
from google.appengine.ext import zipserve
RE_FIND_GROUPS = re.compile('\(.*?\)')
class PluginIterator:
def __init__(self, plugins_path='plugins'):
self.iterating = False
self.plugins_path = plugins_path
self.list = []
self.cursor = 0
def __iter__(self):
return self
def next(self):
if not self.iterating:
self.iterating = True
self.list = os.listdir(self.plugins_path)
self.cursor = 0
if self.cursor >= len(self.list):
self.iterating = False
raise StopIteration
else:
value = self.list[self.cursor]
self.cursor += 1
if os.path.isdir(os.path.join(self.plugins_path, value)):
return (value,'%s.%s.%s'%(self.plugins_path,value,value))
elif value.endswith('.py') and not value=='__init__.py':
value=value[:-3]
return (value,'%s.%s'%(self.plugins_path,value))
else:
return self.next()
class Plugins:
def __init__(self,blog=None):
self.blog=blog
self.list={}
self._filter_plugins={}
self._action_plugins={}
self._urlmap={}
self._handlerlist={}
self._setupmenu=[]
pi=PluginIterator()
self.active_list=OptionSet.getValue("PluginActive",[])
for v,m in pi:
try:
#import plugins modules
mod=__import__(m,globals(),locals(),[v])
plugin=getattr(mod,v)()
#internal name
plugin.iname=v
plugin.active=v in self.active_list
plugin.blog=self.blog
self.list[v]=plugin
except:
pass
def add_urlhandler(self,plugin,application):
for regexp,handler in plugin._handlerlist.items():
try:
application._handler_map[handler.__name__] = handler
if not regexp.startswith('^'):
regexp = '^' + regexp
if not regexp.endswith('$'):
regexp += '$'
compiled = re.compile(regexp)
application._url_mapping.insert(-2,(compiled, handler))
num_groups = len(RE_FIND_GROUPS.findall(regexp))
handler_patterns = application._pattern_map.setdefault(handler, [])
handler_patterns.insert(-2,(compiled, num_groups))
except:
pass
def remove_urlhandler(self,plugin,application):
for regexp,handler in plugin._handlerlist.items():
try:
if application._handler_map.has_key(handler.__name__):
del application._handler_map[handler.__name__]
for um in application._url_mapping:
if um[1].__name__==handler.__name__:
del um
break
for pm in application._pattern_map:
if pm.__name__==handler.__name__:
del pm
break
except:
pass
def register_handlerlist(self,application):
for name,item in self.list.items():
if item.active and item._handlerlist:
self.add_urlhandler(item,application)
def reload(self):
pass
def __getitem__(self,index):
return self.list.values()[index]
def getPluginByName(self,iname):
if self.list.has_key(iname):
return self.list[iname]
else:
return None
def activate(self,iname,active):
if active:
plugin=self.getPluginByName(iname)
if plugin:
if (iname not in self.active_list):
self.active_list.append(iname)
OptionSet.setValue("PluginActive",self.active_list)
plugin.active=active
#add filter
for k,v in plugin._filter.items():
if self._filter_plugins.has_key(k):
if not v in self._filter_plugins[k]:
self._filter_plugins[k].append(v)
#add action
for k,v in plugin._action.items():
if self._action_plugins.has_key(k):
if not v in self._action_plugins[k]:
self._action_plugins[k].append(v)
if self.blog.application:
self.add_urlhandler(plugin,self.blog.application)
else:
plugin=self.getPluginByName(iname)
if plugin:
if (iname in self.active_list):
self.active_list.remove(iname)
OptionSet.setValue("PluginActive",self.active_list)
plugin.active=active
#remove filter
for k,v in plugin._filter.items():
if self._filter_plugins.has_key(k):
if v in self._filter_plugins[k]:
self._filter_plugins[k].remove(v)
#remove action
for k,v in plugin._action.items():
if self._action_plugins.has_key(k):
if v in self._action_plugins[k]:
self._action_plugins[k].remove(v)
if self.blog.application:
self.remove_urlhandler(plugin,self.blog.application)
self._urlmap={}
self._setupmenu=[]
def filter(self,attr,value):
rlist=[]
for item in self:
if item.active and hasattr(item,attr) and getattr(item,attr)==value:
rlist.append(item)
return rlist
def get_filter_plugins(self,name):
if not self._filter_plugins.has_key(name) :
for item in self:
if item.active and hasattr(item,"_filter") :
if item._filter.has_key(name):
if self._filter_plugins.has_key(name):
self._filter_plugins[name].append(item._filter[name])
else:
self._filter_plugins[name]=[item._filter[name]]
if self._filter_plugins.has_key(name):
return tuple(self._filter_plugins[name])
else:
return ()
def get_action_plugins(self,name):
if not self._action_plugins.has_key(name) :
for item in self:
if item.active and hasattr(item,"_action") :
if item._action.has_key(name):
if self._action_plugins.has_key(name):
self._action_plugins[name].append(item._action[name])
else:
self._action_plugins[name]=[item._action[name]]
if self._action_plugins.has_key(name):
return tuple(self._action_plugins[name])
else:
return ()
def get_urlmap_func(self,url):
if not self._urlmap:
for item in self:
if item.active:
self._urlmap.update(item._urlmap)
if self._urlmap.has_key(url):
return self._urlmap[url]
else:
return None
def get_setupmenu(self):
#Get menu list for admin setup page
if not self._setupmenu:
for item in self:
if item.active:
self._setupmenu+=item._setupmenu
return self._setupmenu
def get_handlerlist(self,url):
if not self._handlerlist:
for item in self:
if item.active:
self._handlerlist.update(item._handlerlist)
if self._handlerlist.has_key(url):
return self._handlerlist[url]
else:
return {}
def tigger_filter(self,name,content,*arg1,**arg2):
for func in self.get_filter_plugins(name):
content=func(content,*arg1,**arg2)
return content
def tigger_action(self,name,*arg1,**arg2):
for func in self.get_action_plugins(name):
func(*arg1,**arg2)
def tigger_urlmap(self,url,*arg1,**arg2):
func=self.get_urlmap_func(url)
if func:
func(*arg1,**arg2)
return True
else:
return None
class Plugin:
def __init__(self,pfile=__file__):
self.name="Unnamed"
self.author=""
self.description=""
self.uri=""
self.version=""
self.authoruri=""
self.template_vals={}
self.dir=os.path.dirname(pfile)
self._filter={}
self._action={}
self._urlmap={}
self._handlerlist={}
self._urlhandler={}
self._setupmenu=[]
def get(self,page):
return "<h3>%s</h3><p>%s</p>"%(self.name,self.description)
def render_content(self,template_file,template_vals={}):
"""
Helper method to render the appropriate template
"""
self.template_vals.update(template_vals)
path = os.path.join(self.dir, template_file)
return template.render(path, self.template_vals)
def error(self,msg=""):
return "<h3>Error:%s</h3>"%msg
def register_filter(self,name,func):
self._filter[name]=func
def register_action(self,name,func):
self._action[name]=func
def register_urlmap(self,url,func):
self._urlmap[url]=func
def register_urlhandler(self,url,handler):
self._handlerlist[url]=handler
def register_urlzip(self,name,zipfile):
zipfile=os.path.join(self.dir,zipfile)
self._handlerlist[name]=zipserve.make_zip_handler(zipfile)
def register_setupmenu(self,m_id,title,url):
#Add menu to admin setup page.
#m_id is a flag to check current page
self._setupmenu.append({'m_id':m_id,'title':title,'url':url})
class Plugin_importbase(Plugin):
def __init__(self,pfile,name,description=""):
Plugin.__init__(self,pfile)
self.is_import_plugin=True
self.import_name=name
self.import_description=description
def post(self):
pass
| Python |
# -*- coding: utf-8 -*-
import os,logging
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext.db import Model as DBModel
from google.appengine.api import memcache
from google.appengine.api import mail
from google.appengine.api import urlfetch
from datetime import datetime
import urllib, hashlib,urlparse
import zipfile,re,pickle,uuid
#from base import *
logging.info('module base reloaded')
rootpath=os.path.dirname(__file__)
def vcache(key="",time=3600):
def _decorate(method):
def _wrapper(*args, **kwargs):
if not g_blog.enable_memcache:
return method(*args, **kwargs)
result=method(*args, **kwargs)
memcache.set(key,result,time)
return result
return _wrapper
return _decorate
class Theme:
def __init__(self, name='default'):
self.name = name
self.mapping_cache = {}
self.dir = '/themes/%s' % name
self.viewdir=os.path.join(rootpath, 'view')
self.server_dir = os.path.join(rootpath, 'themes',self.name)
if os.path.exists(self.server_dir):
self.isZip=False
else:
self.isZip=True
self.server_dir =self.server_dir+".zip"
#self.server_dir=os.path.join(self.server_dir,"templates")
logging.debug('server_dir:%s'%self.server_dir)
def __getattr__(self, name):
if self.mapping_cache.has_key(name):
return self.mapping_cache[name]
else:
path ="/".join((self.name,'templates', name + '.html'))
logging.debug('path:%s'%path)
## if not os.path.exists(path):
## path = os.path.join(rootpath, 'themes', 'default', 'templates', name + '.html')
## if not os.path.exists(path):
## path = None
self.mapping_cache[name]=path
return path
class ThemeIterator:
def __init__(self, theme_path='themes'):
self.iterating = False
self.theme_path = theme_path
self.list = []
def __iter__(self):
return self
def next(self):
if not self.iterating:
self.iterating = True
self.list = os.listdir(self.theme_path)
self.cursor = 0
if self.cursor >= len(self.list):
self.iterating = False
raise StopIteration
else:
value = self.list[self.cursor]
self.cursor += 1
if value.endswith('.zip'):
value=value[:-4]
return value
#return (str(value), unicode(value))
class LangIterator:
def __init__(self,path='locale'):
self.iterating = False
self.path = path
self.list = []
for value in os.listdir(self.path):
if os.path.isdir(os.path.join(self.path,value)):
if os.path.exists(os.path.join(self.path,value,'LC_MESSAGES')):
try:
lang=open(os.path.join(self.path,value,'language')).readline()
self.list.append({'code':value,'lang':lang})
except:
self.list.append( {'code':value,'lang':value})
def __iter__(self):
return self
def next(self):
if not self.iterating:
self.iterating = True
self.cursor = 0
if self.cursor >= len(self.list):
self.iterating = False
raise StopIteration
else:
value = self.list[self.cursor]
self.cursor += 1
return value
def getlang(self,language):
from django.utils.translation import to_locale
for item in self.list:
if item['code']==language or item['code']==to_locale(language):
return item
return {'code':'en_US','lang':'English'}
class BaseModel(db.Model):
def __init__(self, parent=None, key_name=None, _app=None, **kwds):
self.__isdirty = False
DBModel.__init__(self, parent=None, key_name=None, _app=None, **kwds)
def __setattr__(self,attrname,value):
"""
DataStore api stores all prop values say "email" is stored in "_email" so
we intercept the set attribute, see if it has changed, then check for an
onchanged method for that property to call
"""
if (attrname.find('_') != 0):
if hasattr(self,'_' + attrname):
curval = getattr(self,'_' + attrname)
if curval != value:
self.__isdirty = True
if hasattr(self,attrname + '_onchange'):
getattr(self,attrname + '_onchange')(curval,value)
DBModel.__setattr__(self,attrname,value)
class Cache(db.Model):
cachekey = db.StringProperty(multiline=False)
content = db.TextProperty()
class Blog(db.Model):
owner = db.UserProperty()
author=db.StringProperty(default='admin')
rpcuser=db.StringProperty(default='admin')
rpcpassword=db.StringProperty(default='')
description = db.TextProperty()
baseurl = db.StringProperty(multiline=False,default=None)
urlpath = db.StringProperty(multiline=False)
title = db.StringProperty(multiline=False,default='Micolog')
subtitle = db.StringProperty(multiline=False,default='This is a micro blog.')
entrycount = db.IntegerProperty(default=0)
posts_per_page= db.IntegerProperty(default=10)
feedurl = db.StringProperty(multiline=False,default='/feed')
blogversion = db.StringProperty(multiline=False,default='0.30')
theme_name = db.StringProperty(multiline=False,default='default')
enable_memcache = db.BooleanProperty(default = False)
link_format=db.StringProperty(multiline=False,default='%(year)s/%(month)s/%(day)s/%(postname)s.html')
comment_notify_mail=db.BooleanProperty(default=True)
#评论顺序
comments_order=db.IntegerProperty(default=0)
#每页评论数
comments_per_page=db.IntegerProperty(default=20)
#comment check type 0-No 1-算术 2-验证码 3-客户端计算
comment_check_type=db.IntegerProperty(default=1)
blognotice=db.TextProperty(default='')
domain=db.StringProperty()
show_excerpt=db.BooleanProperty(default=True)
version=0.713
timedelta=db.FloatProperty(default=8.0)# hours
language=db.StringProperty(default="en-us")
sitemap_entries=db.IntegerProperty(default=30)
sitemap_include_category=db.BooleanProperty(default=False)
sitemap_include_tag=db.BooleanProperty(default=False)
sitemap_ping=db.BooleanProperty(default=False)
default_link_format=db.StringProperty(multiline=False,default='?p=%(post_id)s')
default_theme=Theme("default")
allow_pingback=db.BooleanProperty(default=False)
allow_trackback=db.BooleanProperty(default=False)
theme=None
langs=None
application=None
def __init__(self,
parent=None,
key_name=None,
_app=None,
_from_entity=False,
**kwds):
from micolog_plugin import Plugins
self.plugins=Plugins(self)
db.Model.__init__(self,parent,key_name,_app,_from_entity,**kwds)
def tigger_filter(self,name,content,*arg1,**arg2):
return self.plugins.tigger_filter(name,content,blog=self,*arg1,**arg2)
def tigger_action(self,name,*arg1,**arg2):
return self.plugins.tigger_action(name,blog=self,*arg1,**arg2)
def tigger_urlmap(self,url,*arg1,**arg2):
return self.plugins.tigger_urlmap(url,blog=self,*arg1,**arg2)
def get_ziplist(self):
return self.plugins.get_ziplist();
def save(self):
self.put()
def initialsetup(self):
self.title = 'Your Blog Title'
self.subtitle = 'Your Blog Subtitle'
def get_theme(self):
self.theme= Theme(self.theme_name);
return self.theme
def get_langs(self):
self.langs=LangIterator()
return self.langs
def cur_language(self):
return self.get_langs().getlang(self.language)
def rootpath(self):
return rootpath
@vcache("blog.hotposts")
def hotposts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-readtimes').fetch(8)
@vcache("blog.recentposts")
def recentposts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-date').fetch(8)
@vcache("blog.postscount")
def postscount(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-date').count()
class Category(db.Model):
uid=db.IntegerProperty()
name=db.StringProperty(multiline=False)
slug=db.StringProperty(multiline=False)
parent_cat=db.SelfReferenceProperty()
@property
def posts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).filter('categorie_keys =',self)
@property
def count(self):
return self.posts.count()
def put(self):
db.Model.put(self)
g_blog.tigger_action("save_category",self)
def delete(self):
for entry in Entry.all().filter('categorie_keys =',self):
entry.categorie_keys.remove(self.key())
entry.put()
db.Model.delete(self)
g_blog.tigger_action("delete_category",self)
def ID(self):
try:
id=self.key().id()
if id:
return id
except:
pass
if self.uid :
return self.uid
else:
#旧版本Category没有ID,为了与wordpress兼容
from random import randint
uid=randint(0,99999999)
cate=Category.all().filter('uid =',uid).get()
while cate:
uid=randint(0,99999999)
cate=Category.all().filter('uid =',uid).get()
self.uid=uid
print uid
self.put()
return uid
@classmethod
def get_from_id(cls,id):
cate=Category.get_by_id(id)
if cate:
return cate
else:
cate=Category.all().filter('uid =',id).get()
return cate
@property
def children(self):
key=self.key()
return [c for c in Category.all().filter('parent_cat =',self)]
@classmethod
def allTops(self):
return [c for c in Category.all() if not c.parent_cat]
class Archive(db.Model):
monthyear = db.StringProperty(multiline=False)
year = db.StringProperty(multiline=False)
month = db.StringProperty(multiline=False)
entrycount = db.IntegerProperty(default=0)
date = db.DateTimeProperty(auto_now_add=True)
class Tag(db.Model):
tag = db.StringProperty(multiline=False)
tagcount = db.IntegerProperty(default=0)
@property
def posts(self):
return Entry.all('entrytype =','post').filter("published =", True).filter('tags =',self)
@classmethod
def add(cls,value):
if value:
tag= Tag.get_by_key_name(value)
if not tag:
tag=Tag(key_name=value)
tag.tag=value
tag.tagcount+=1
tag.put()
return tag
else:
return None
@classmethod
def remove(cls,value):
if value:
tag= Tag.get_by_key_name(value)
if tag:
if tag.tagcount>1:
tag.tagcount-=1
else:
tag.delete()
class Link(db.Model):
href = db.StringProperty(multiline=False,default='')
linktype = db.StringProperty(multiline=False,default='blogroll')
linktext = db.StringProperty(multiline=False,default='')
linkcomment = db.StringProperty(multiline=False,default='')
createdate=db.DateTimeProperty(auto_now=True)
@property
def get_icon_url(self):
"get ico url of the wetsite"
ico_path = '/favicon.ico'
ix = self.href.find('/',len('http://') )
return (ix>0 and self.href[:ix] or self.href ) + ico_path
def put(self):
db.Model.put(self)
g_blog.tigger_action("save_link",self)
def delete(self):
db.Model.delete(self)
g_blog.tigger_action("delete_link",self)
class Entry(BaseModel):
author = db.UserProperty()
author_name = db.StringProperty()
published = db.BooleanProperty(default=False)
content = db.TextProperty(default='')
readtimes = db.IntegerProperty(default=0)
title = db.StringProperty(multiline=False,default='')
date = db.DateTimeProperty(auto_now_add=True)
mod_date = db.DateTimeProperty(auto_now_add=True)
tags = db.StringListProperty()
categorie_keys=db.ListProperty(db.Key)
slug = db.StringProperty(multiline=False,default='')
link= db.StringProperty(multiline=False,default='')
monthyear = db.StringProperty(multiline=False)
entrytype = db.StringProperty(multiline=False,default='post',choices=[
'post','page'])
entry_parent=db.IntegerProperty(default=0)#When level=0 show on main menu.
menu_order=db.IntegerProperty(default=0)
commentcount = db.IntegerProperty(default=0)
allow_comment = db.BooleanProperty(default=True) #allow comment
#allow_pingback=db.BooleanProperty(default=False)
allow_trackback=db.BooleanProperty(default=True)
password=db.StringProperty()
#compatible with wordpress
is_wp=db.BooleanProperty(default=False)
post_id= db.IntegerProperty()
excerpt=db.StringProperty(multiline=True)
#external page
is_external_page=db.BooleanProperty(default=False)
target=db.StringProperty(default="_self")
external_page_address=db.StringProperty()
#keep in top
sticky=db.BooleanProperty(default=False)
postname=''
_relatepost=None
@property
def content_excerpt(self):
return self.get_content_excerpt(_('..more').decode('utf8'))
def get_author_user(self):
if not self.author:
self.author=g_blog.owner
return User.all().filter('email =',self.author.email()).get()
def get_content_excerpt(self,more='..more'):
if g_blog.show_excerpt:
if self.excerpt:
return self.excerpt+' <a href="/%s">%s</a>'%(self.link,more)
else:
sc=self.content.split('<!--more-->')
if len(sc)>1:
return sc[0]+u' <a href="/%s">%s</a>'%(self.link,more)
else:
return sc[0]
else:
return self.content
def slug_onchange(self,curval,newval):
if not (curval==newval):
self.setpostname(newval)
def setpostname(self,newval):
#check and fix double slug
if newval:
slugcount=Entry.all()\
.filter('entrytype',self.entrytype)\
.filter('date <',self.date)\
.filter('slug =',newval)\
.filter('published',True)\
.count()
if slugcount>0:
self.postname=newval+str(slugcount)
else:
self.postname=newval
else:
self.postname=""
@property
def fullurl(self):
return g_blog.baseurl+'/'+self.link;
@property
def categories(self):
try:
return db.get(self.categorie_keys)
except:
return []
@property
def post_status(self):
return self.published and 'publish' or 'draft'
def settags(self,values):
if not values:tags=[]
if type(values)==type([]):
tags=values
else:
tags=values.split(',')
if not self.tags:
removelist=[]
addlist=tags
else:
#search different tags
removelist=[n for n in self.tags if n not in tags]
addlist=[n for n in tags if n not in self.tags]
for v in removelist:
Tag.remove(v)
for v in addlist:
Tag.add(v)
self.tags=tags
def get_comments_by_page(self,index,psize):
return self.comments().fetch(psize,offset = (index-1) * psize)
@property
def strtags(self):
return ','.join(self.tags)
@property
def edit_url(self):
return '/admin/%s?key=%s&action=edit'%(self.entrytype,self.key())
def comments(self):
if g_blog.comments_order:
return Comment.all().filter('entry =',self).order('-date')
else:
return Comment.all().filter('entry =',self).order('date')
def commentsTops(self):
return [c for c in self.comments() if c.parent_key()==None]
def delete_comments(self):
cmts = Comment.all().filter('entry =',self)
for comment in cmts:
comment.delete()
self.commentcount = 0
def update_archive(self,cnt=1):
"""Checks to see if there is a month-year entry for the
month of current blog, if not creates it and increments count"""
my = self.date.strftime('%B %Y') # September-2008
sy = self.date.strftime('%Y') #2008
sm = self.date.strftime('%m') #09
archive = Archive.all().filter('monthyear',my).get()
if self.entrytype == 'post':
if not archive:
archive = Archive(monthyear=my,year=sy,month=sm,entrycount=1)
self.monthyear = my
archive.put()
else:
# ratchet up the count
archive.entrycount += cnt
archive.put()
g_blog.entrycount+=cnt
g_blog.put()
def save(self,is_publish=False):
"""
Use this instead of self.put(), as we do some other work here
@is_pub:Check if need publish id
"""
g_blog.tigger_action("pre_save_post",self,is_publish)
my = self.date.strftime('%B %Y') # September 2008
self.monthyear = my
old_publish=self.published
self.mod_date=datetime.now()
if is_publish:
if not self.is_wp:
self.put()
self.post_id=self.key().id()
#fix for old version
if not self.postname:
self.setpostname(self.slug)
vals={'year':self.date.year,'month':str(self.date.month).zfill(2),'day':self.date.day,
'postname':self.postname,'post_id':self.post_id}
if self.entrytype=='page':
if self.slug:
self.link=self.postname
else:
#use external page address as link
if self.is_external_page:
self.link=self.external_page_address
else:
self.link=g_blog.default_link_format%vals
else:
if g_blog.link_format and self.postname:
self.link=g_blog.link_format.strip()%vals
else:
self.link=g_blog.default_link_format%vals
self.published=is_publish
self.put()
if is_publish:
if g_blog.sitemap_ping:
Sitemap_NotifySearch()
if old_publish and not is_publish:
self.update_archive(-1)
if not old_publish and is_publish:
self.update_archive(1)
self.removecache()
self.put()
g_blog.tigger_action("save_post",self,is_publish)
def removecache(self):
memcache.delete('/')
memcache.delete('/'+self.link)
memcache.delete('/sitemap')
memcache.delete('blog.postcount')
g_blog.tigger_action("clean_post_cache",self)
@property
def next(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('post_id').filter('post_id >',self.post_id).fetch(1)
@property
def prev(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-post_id').filter('post_id <',self.post_id).fetch(1)
@property
def relateposts(self):
if self._relatepost:
return self._relatepost
else:
if self.tags:
self._relatepost= Entry.gql("WHERE published=True and tags IN :1 and post_id!=:2 order by post_id desc ",self.tags,self.post_id).fetch(5)
else:
self._relatepost= []
return self._relatepost
@property
def trackbackurl(self):
if self.link.find("?")>-1:
return g_blog.baseurl+"/"+self.link+"&code="+str(self.key())
else:
return g_blog.baseurl+"/"+self.link+"?code="+str(self.key())
def getbylink(self):
pass
def delete(self):
g_blog.tigger_action("pre_delete_post",self)
if self.published:
self.update_archive(-1)
self.delete_comments()
db.Model.delete(self)
g_blog.tigger_action("delete_post",self)
class User(db.Model):
user = db.UserProperty(required = False)
dispname = db.StringProperty()
email=db.StringProperty()
website = db.LinkProperty()
isadmin=db.BooleanProperty(default=False)
isAuthor=db.BooleanProperty(default=True)
#rpcpwd=db.StringProperty()
def __unicode__(self):
#if self.dispname:
return self.dispname
#else:
# return self.user.nickname()
def __str__(self):
return self.__unicode__().encode('utf-8')
COMMENT_NORMAL=0
COMMENT_TRACKBACK=1
COMMENT_PINGBACK=2
class Comment(db.Model):
entry = db.ReferenceProperty(Entry)
date = db.DateTimeProperty(auto_now_add=True)
content = db.TextProperty(required=True)
author=db.StringProperty()
email=db.EmailProperty()
weburl=db.URLProperty()
status=db.IntegerProperty(default=0)
reply_notify_mail=db.BooleanProperty(default=False)
ip=db.StringProperty()
ctype=db.IntegerProperty(default=COMMENT_NORMAL)
@property
def shortcontent(self,len=20):
scontent=self.content
scontent=re.sub(r'<br\s*/>',' ',scontent)
scontent=re.sub(r'<[^>]+>','',scontent)
scontent=re.sub(r'(@[\S]+)-\d+[:]',r'\1:',scontent)
return scontent[:len].replace('<','<').replace('>','>')
def gravatar_url(self):
# Set your variables here
default = g_blog.baseurl+'/static/images/homsar.jpeg'
if not self.email:
return default
size = 50
try:
# construct the url
imgurl = "http://www.gravatar.com/avatar/"
imgurl +=hashlib.md5(self.email).hexdigest()+"?"+ urllib.urlencode({
'd':default, 's':str(size),'r':'G'})
return imgurl
except:
return default
def save(self):
self.put()
self.entry.commentcount+=1
self.entry.put()
memcache.delete("/"+self.entry.link)
def delit(self):
self.entry.commentcount-=1
self.entry.put()
self.delete()
def put(self):
g_blog.tigger_action("pre_comment",self)
db.Model.put(self)
g_blog.tigger_action("save_comment",self)
def delete(self):
db.Model.delete(self)
g_blog.tigger_action("delete_comment",self)
@property
def children(self):
key=self.key()
comments=Comment.all().ancestor(self)
return [c for c in comments if c.parent_key()==key]
class Media(db.Model):
name =db.StringProperty()
mtype=db.StringProperty()
bits=db.BlobProperty()
date=db.DateTimeProperty(auto_now_add=True)
download=db.IntegerProperty(default=0)
@property
def size(self):
return len(self.bits)
class OptionSet(db.Model):
name=db.StringProperty()
value=db.TextProperty()
#blobValue=db.BlobProperty()
#isBlob=db.BooleanProperty()
@classmethod
def getValue(cls,name,default=None):
try:
opt=OptionSet.get_by_key_name(name)
return pickle.loads(str(opt.value))
except:
return default
@classmethod
def setValue(cls,name,value):
opt=OptionSet.get_or_insert(name)
opt.name=name
opt.value=pickle.dumps(value)
opt.put()
@classmethod
def remove(cls,name):
opt= OptionSet.get_by_key_name(name)
if opt:
opt.delete()
NOTIFICATION_SITES = [
('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap')
]
def Sitemap_NotifySearch():
""" Send notification of the new Sitemap(s) to the search engines. """
url=g_blog.baseurl+"/sitemap"
# Cycle through notifications
# To understand this, see the comment near the NOTIFICATION_SITES comment
for ping in NOTIFICATION_SITES:
query_map = ping[3]
query_attr = ping[5]
query_map[query_attr] = url
query = urllib.urlencode(query_map)
notify = urlparse.urlunsplit((ping[0], ping[1], ping[2], query, ping[4]))
try:
urlfetch.fetch(notify)
except :
logging.error('Cannot contact: %s' % ping[1])
def InitBlogData():
global g_blog
OptionSet.setValue('PluginActive',[u'googleAnalytics', u'wordpress', u'sys_plugin'])
g_blog = Blog(key_name = 'default')
g_blog.domain=os.environ['HTTP_HOST']
g_blog.baseurl="http://"+g_blog.domain
g_blog.feedurl=g_blog.baseurl+"/feed"
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
lang="zh-cn"
if os.environ.has_key('HTTP_ACCEPT_LANGUAGE'):
lang=os.environ['HTTP_ACCEPT_LANGUAGE'].split(',')[0]
from django.utils.translation import activate,to_locale
g_blog.language=to_locale(lang)
from django.conf import settings
settings._target = None
activate(g_blog.language)
g_blog.save()
entry=Entry(title=_("Hello world!").decode('utf8'))
entry.content=_('<p>Welcome to micolog. This is your first post. Edit or delete it, then start blogging!</p>').decode('utf8')
entry.save(True)
link=Link(href='http://xuming.net',linktext=_("Xuming's blog").decode('utf8'))
link.put()
return g_blog
def gblog_init():
global g_blog
try:
if g_blog :
return g_blog
except:
pass
g_blog = Blog.get_by_key_name('default')
if not g_blog:
g_blog=InitBlogData()
g_blog.get_theme()
g_blog.rootdir=os.path.dirname(__file__)
return g_blog
try:
g_blog=gblog_init()
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.utils.translation import activate
from django.conf import settings
settings._target = None
activate(g_blog.language)
except:
pass
| Python |
# -*- coding: utf-8 -*-
import cgi, os,sys,traceback
import wsgiref.handlers
##os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
##from django.conf import settings
##settings._target = None
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.utils.translation import check_for_language, activate, to_locale, get_language
from django.conf import settings
settings._target = None
from google.appengine.ext.webapp import template, \
WSGIApplication
from google.appengine.api import users
#import app.webapp as webapp2
from google.appengine.ext import db
from google.appengine.ext import zipserve
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from google.appengine.api.labs import taskqueue
from datetime import datetime ,timedelta
import base64,random,math,zipfile
from django.utils import simplejson
import pickle
from base import *
from model import *
from app.trackback import TrackBack
import xmlrpclib
from xmlrpclib import Fault
class Error404(BaseRequestHandler):
#@printinfo
def get(self,slug=None):
self.render2('views/admin/404.html')
class setlanguage(BaseRequestHandler):
def get(self):
lang_code = self.param('language')
next = self.param('next')
if (not next) and os.environ.has_key('HTTP_REFERER'):
next = os.environ['HTTP_REFERER']
if not next:
next = '/'
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.utils.translation import check_for_language, activate, to_locale, get_language
from django.conf import settings
settings._target = None
if lang_code and check_for_language(lang_code):
g_blog.language=lang_code
activate(g_blog.language)
g_blog.save()
self.redirect(next)
## if hasattr(request, 'session'):
## request.session['django_language'] = lang_code
## else:
## cookiestr='django_language=%s;expires=%s;domain=%s;path=/'%( lang_code,
## (datetime.now()+timedelta(days=100)).strftime("%a, %d-%b-%Y %H:%M:%S GMT"),
## ''
## )
## self.write(cookiestr)
#self.response.headers.add_header('Set-Cookie', cookiestr)
class admin_do_action(BaseRequestHandler):
@requires_admin
def get(self,slug=None):
try:
func=getattr(self,'action_'+slug)
if func and callable(func):
func()
else:
self.render2('views/admin/error.html',{'message':_('This operate has not defined!')})
except:
self.render2('views/admin/error.html',{'message':_('This operate has not defined!')})
@requires_admin
def post(self,slug=None):
try:
func=getattr(self,'action_'+slug)
if func and callable(func):
func()
else:
self.render2('views/admin/error.html',{'message':_('This operate has not defined!')})
except:
self.render2('views/admin/error.html',{'message':_('This operate has not defined!')})
def action_test(self):
self.write(os.environ)
def action_cacheclear(self):
memcache.flush_all()
self.write(_('"Cache cleared successful"'))
def action_updatecomments(self):
for entry in Entry.all():
cnt=entry.comments().count()
if cnt<>entry.commentcount:
entry.commentcount=cnt
entry.put()
self.write(_('"All comments updated"'))
def action_updatelink(self):
link_format=self.param('linkfmt')
if link_format:
link_format=link_format.strip()
g_blog.link_format=link_format
g_blog.save()
for entry in Entry.all():
vals={'year':entry.date.year,'month':str(entry.date.month).zfill(2),'day':entry.date.day,
'postname':entry.slug,'post_id':entry.post_id}
if entry.slug:
newlink=link_format%vals
else:
newlink=g_blog.default_link_format%vals
if entry.link<>newlink:
entry.link=newlink
entry.put()
self.write(_('"Link formated succeed"'))
else:
self.write(_('"Please input url format."'))
def action_init_blog(self,slug=None):
for com in Comment.all():
com.delete()
for entry in Entry.all():
entry.delete()
g_blog.entrycount=0
self.write(_('"Init has succeed."'))
def action_update_tags(self,slug=None):
for tag in Tag.all():
tag.delete()
for entry in Entry.all().filter('entrytype =','post'):
if entry.tags:
for t in entry.tags:
try:
Tag.add(t)
except:
traceback.print_exc()
self.write(_('"All tags for entry have been updated."'))
def action_update_archives(self,slug=None):
for archive in Archive.all():
archive.delete()
entries=Entry.all().filter('entrytype =','post')
archives={}
for entry in entries:
my = entry.date.strftime('%B %Y') # September-2008
sy = entry.date.strftime('%Y') #2008
sm = entry.date.strftime('%m') #09
if archives.has_key(my):
archive=archives[my]
archive.entrycount+=1
else:
archive = Archive(monthyear=my,year=sy,month=sm,entrycount=1)
archives[my]=archive
for ar in archives.values():
ar.put()
self.write(_('"All entries have been updated."'))
def action_trackback_ping(self):
tbUrl=self.param('tbUrl')
title=self.param('title')
excerpt=self.param('excerpt')
url=self.param('url')
blog_name=self.param('blog_name')
tb=TrackBack(tbUrl,title,excerpt,url,blog_name)
tb.ping()
def action_pingback_ping(self):
"""Try to notify the server behind `target_uri` that `source_uri`
points to `target_uri`. If that fails an `PingbackError` is raised.
"""
source_uri=self.param('source')
target_uri=self.param('target')
try:
response =urlfetch.fetch(target_uri)
except:
raise PingbackError(32)
try:
pingback_uri = response.headers['X-Pingback']
except KeyError:
_pingback_re = re.compile(r'<link rel="pingback" href="([^"]+)" ?/?>(?i)')
match = _pingback_re.search(response.data)
if match is None:
raise PingbackError(33)
pingback_uri =urldecode(match.group(1))
rpc = xmlrpclib.ServerProxy(pingback_uri)
try:
return rpc.pingback.ping(source_uri, target_uri)
except Fault, e:
raise PingbackError(e.faultCode)
except:
raise PingbackError(32)
class admin_tools(BaseRequestHandler):
def __init__(self):
self.current="config"
@requires_admin
def get(self,slug=None):
self.render2('views/admin/tools.html')
class admin_sitemap(BaseRequestHandler):
def __init__(self):
self.current="config"
@requires_admin
def get(self,slug=None):
self.render2('views/admin/sitemap.html')
@requires_admin
def post(self):
str_options= self.param('str_options').split(',')
for name in str_options:
value=self.param(name)
setattr(g_blog,name,value)
bool_options= self.param('bool_options').split(',')
for name in bool_options:
value=self.param(name)=='on'
setattr(g_blog,name,value)
int_options= self.param('int_options').split(',')
for name in int_options:
try:
value=int( self.param(name))
setattr(g_blog,name,value)
except:
pass
float_options= self.param('float_options').split(',')
for name in float_options:
try:
value=float( self.param(name))
setattr(g_blog,name,value)
except:
pass
g_blog.save()
self.render2('views/admin/sitemap.html',{})
class admin_import(BaseRequestHandler):
def __init__(self):
self.current='config'
@requires_admin
def get(self,slug=None):
gblog_init()
self.render2('views/admin/import.html',{'importitems':
self.blog.plugins.filter('is_import_plugin',True)})
## def post(self):
## try:
## queue=taskqueue.Queue("import")
## wpfile=self.param('wpfile')
## #global imt
## imt=import_wordpress(wpfile)
## imt.parse()
## memcache.set("imt",imt)
##
#### import_data=OptionSet.get_or_insert(key_name="import_data")
#### import_data.name="import_data"
#### import_data.bigvalue=pickle.dumps(imt)
#### import_data.put()
##
## queue.add(taskqueue.Task( url="/admin/import_next"))
## self.render2('views/admin/import.html',
## {'postback':True})
## return
## memcache.set("import_info",{'count':len(imt.entries),'msg':'Begin import...','index':1})
## #self.blog.import_info={'count':len(imt.entries),'msg':'Begin import...','index':1}
## if imt.categories:
## queue.add(taskqueue.Task( url="/admin/import_next",params={'cats': pickle.dumps(imt.categories),'index':1}))
##
## return
## index=0
## if imt.entries:
## for entry in imt.entries :
## try:
## index=index+1
## queue.add(taskqueue.Task(url="/admin/import_next",params={'entry':pickle.dumps(entry),'index':index}))
## except:
## pass
##
## except:
## self.render2('views/admin/import.html',{'error':'import faiure.'})
class admin_setup(BaseRequestHandler):
def __init__(self):
self.current='config'
@requires_admin
def get(self,slug=None):
vals={'themes':ThemeIterator()}
self.render2('views/admin/setup.html',vals)
@requires_admin
def post(self):
old_theme=g_blog.theme_name
str_options= self.param('str_options').split(',')
for name in str_options:
value=self.param(name)
setattr(g_blog,name,value)
bool_options= self.param('bool_options').split(',')
for name in bool_options:
value=self.param(name)=='on'
setattr(g_blog,name,value)
int_options= self.param('int_options').split(',')
for name in int_options:
try:
value=int( self.param(name))
setattr(g_blog,name,value)
except:
pass
float_options= self.param('float_options').split(',')
for name in float_options:
try:
value=float( self.param(name))
setattr(g_blog,name,value)
except:
pass
if old_theme !=g_blog.theme_name:
g_blog.get_theme()
g_blog.owner=self.login_user
g_blog.author=g_blog.owner.nickname()
g_blog.save()
gblog_init()
vals={'themes':ThemeIterator()}
memcache.flush_all()
self.render2('views/admin/setup.html',vals)
class admin_entry(BaseRequestHandler):
def __init__(self):
self.current='write'
@requires_admin
def get(self,slug='post'):
action=self.param("action")
entry=None
cats=Category.all()
if action and action=='edit':
try:
key=self.param('key')
entry=Entry.get(key)
except:
pass
else:
action='add'
def mapit(cat):
return {'name':cat.name,'slug':cat.slug,'select':entry and cat.key() in entry.categorie_keys}
vals={'action':action,'entry':entry,'entrytype':slug,'cats':map(mapit,cats)}
self.render2('views/admin/entry.html',vals)
@requires_admin
def post(self,slug='post'):
action=self.param("action")
title=self.param("post_title")
content=self.param('content')
tags=self.param("tags")
cats=self.request.get_all('cats')
key=self.param('key')
if self.param('publish')!='':
published=True
elif self.param('unpublish')!='':
published=False
else:
published=self.param('published')=='True'
allow_comment=self.parambool('allow_comment')
allow_trackback=self.parambool('allow_trackback')
entry_slug=self.param('slug')
entry_parent=self.paramint('entry_parent')
menu_order=self.paramint('menu_order')
entry_excerpt=self.param('excerpt').replace('\n','<br>')
password=self.param('password')
sticky=self.parambool('sticky')
is_external_page=self.parambool('is_external_page')
target=self.param('target')
external_page_address=self.param('external_page_address')
def mapit(cat):
return {'name':cat.name,'slug':cat.slug,'select':cat.slug in cats}
vals={'action':action,'postback':True,'cats':Category.all(),'entrytype':slug,
'cats':map(mapit,Category.all()),
'entry':{ 'title':title,'content':content,'strtags':tags,'key':key,'published':published,
'allow_comment':allow_comment,
'allow_trackback':allow_trackback,
'slug':entry_slug,
'entry_parent':entry_parent,
'excerpt':entry_excerpt,
'menu_order':menu_order,
'is_external_page':is_external_page,
'target':target,
'external_page_address':external_page_address,
'password':password,
'sticky':sticky}
}
if not (title and (content or (is_external_page and external_page_address))):
vals.update({'result':False, 'msg':_('Please input title and content.')})
self.render2('views/admin/entry.html',vals)
else:
if action=='add':
entry= Entry(title=title,content=content)
entry.settags(tags)
entry.entrytype=slug
entry.slug=entry_slug.replace(" ","-")
entry.entry_parent=entry_parent
entry.menu_order=menu_order
entry.excerpt=entry_excerpt
entry.is_external_page=is_external_page
entry.target=target
entry.external_page_address=external_page_address
newcates=[]
entry.allow_comment=allow_comment
entry.allow_trackback=allow_trackback
entry.author=self.author.user
entry.author_name=self.author.dispname
entry.password=password
entry.sticky=sticky
if cats:
for cate in cats:
c=Category.all().filter('slug =',cate)
if c:
newcates.append(c[0].key())
entry.categorie_keys=newcates;
entry.save(published)
if published:
smsg=_('Saved ok. <a href="/%(link)s" target="_blank">View it now!</a>')
else:
smsg=_('Saved ok.')
vals.update({'action':'edit','result':True,'msg':smsg%{'link':str(entry.link)},'entry':entry})
self.render2('views/admin/entry.html',vals)
elif action=='edit':
try:
entry=Entry.get(key)
entry.title=title
entry.content=content
entry.slug=entry_slug.replace(' ','-')
entry.entry_parent=entry_parent
entry.menu_order=menu_order
entry.excerpt=entry_excerpt
entry.is_external_page=is_external_page
entry.target=target
entry.external_page_address=external_page_address
entry.settags(tags)
entry.author=self.author.user
entry.author_name=self.author.dispname
entry.password=password
entry.sticky=sticky
newcates=[]
if cats:
for cate in cats:
c=Category.all().filter('slug =',cate)
if c:
newcates.append(c[0].key())
entry.categorie_keys=newcates;
entry.allow_comment=allow_comment
entry.allow_trackback=allow_trackback
entry.save(published)
if published:
smsg=_('Saved ok. <a href="/%(link)s" target="_blank">View it now!</a>')
else:
smsg=_('Saved ok.')
vals.update({'result':True,'msg':smsg%{'link':str(entry.link)},'entry':entry})
self.render2('views/admin/entry.html',vals)
except:
vals.update({'result':False,'msg':_('Error:Entry can''t been saved.')})
self.render2('views/admin/entry.html',vals)
class admin_entries(BaseRequestHandler):
@requires_admin
def get(self,slug='post'):
try:
page_index=int(self.param('page'))
except:
page_index=1
entries=Entry.all().filter('entrytype =',slug).order('-date')
entries,links=Pager(query=entries,items_per_page=15).fetch(page_index)
self.render2('views/admin/'+slug+'s.html',
{
'current':slug+'s',
'entries':entries,
'pager':links
}
)
@requires_admin
def post(self,slug='post'):
try:
linkcheck= self.request.get_all('checks')
for id in linkcheck:
kid=int(id)
entry=Entry.get_by_id(kid)
#delete it's comments
#entry.delete_comments()
entry.delete()
g_blog.entrycount-=1
finally:
self.redirect('/admin/entries/'+slug)
class admin_categories(BaseRequestHandler):
@requires_admin
def get(self,slug=None):
try:
page_index=int(self.param('page'))
except:
page_index=1
cats=Category.allTops()
entries,pager=Pager(query=cats,items_per_page=15).fetch(page_index)
self.render2('views/admin/categories.html',
{
'current':'categories',
'cats':cats,
'pager':pager
}
)
@requires_admin
def post(self,slug=None):
try:
linkcheck= self.request.get_all('checks')
for key in linkcheck:
cat=Category.get(key)
cat.delete()
finally:
self.redirect('/admin/categories')
class admin_comments(BaseRequestHandler):
@requires_admin
def get(self,slug=None):
try:
page_index=int(self.param('page'))
except:
page_index=1
cq=self.param('cq')
cv=self.param('cv')
if cq and cv:
query=Comment.all().filter(cq+' =',cv).order('-date')
else:
query=Comment.all().order('-date')
comments,pager=Pager(query=query,items_per_page=15).fetch(page_index)
self.render2('views/admin/comments.html',
{
'current':'comments',
'comments':comments,
'pager':pager,
'cq':cq,
'cv':cv
}
)
@requires_admin
def post(self,slug=None):
try:
linkcheck= self.request.get_all('checks')
for key in linkcheck:
comment=Comment.get(key)
comment.delit()
finally:
self.redirect(self.request.uri)
class admin_links(BaseRequestHandler):
@requires_admin
def get(self,slug=None):
self.render2('views/admin/links.html',
{
'current':'links',
'links':Link.all().filter('linktype =','blogroll')#.order('-createdate')
}
)
@requires_admin
def post(self):
linkcheck= self.request.get_all('linkcheck')
for link_id in linkcheck:
kid=int(link_id)
link=Link.get_by_id(kid)
link.delete()
self.redirect('/admin/links')
class admin_link(BaseRequestHandler):
@requires_admin
def get(self,slug=None):
action=self.param("action")
vals={'current':'links'}
if action and action=='edit':
try:
action_id=int(self.param('id'))
link=Link.get_by_id(action_id)
vals.update({'link':link})
except:
pass
else:
action='add'
vals.update({'action':action})
self.render2('views/admin/link.html',vals)
@requires_admin
def post(self):
action=self.param("action")
name=self.param("link_name")
url=self.param("link_url")
comment = self.param("link_comment")
vals={'action':action,'postback':True,'current':'links'}
if not (name and url):
vals.update({'result':False,'msg':_('Please input name and url.')})
self.render2('views/admin/link.html',vals)
else:
if action=='add':
link= Link(linktext=name,href=url,linkcomment=comment)
link.put()
vals.update({'result':True,'msg':'Saved ok'})
self.render2('views/admin/link.html',vals)
elif action=='edit':
try:
action_id=int(self.param('id'))
link=Link.get_by_id(action_id)
link.linktext=name
link.href=url
link.linkcomment = comment
link.put()
#goto link manage page
self.redirect('/admin/links')
except:
vals.update({'result':False,'msg':_('Error:Link can''t been saved.')})
self.render2('views/admin/link.html',vals)
class admin_category(BaseRequestHandler):
def __init__(self):
self.current='categories'
@requires_admin
def get(self,slug=None):
action=self.param("action")
key=self.param('key')
category=None
if action and action=='edit':
try:
category=Category.get(key)
except:
pass
else:
action='add'
vals={'action':action,'category':category,'key':key,'categories':[c for c in Category.all() if not category or c.key()!=category.key()]}
self.render2('views/admin/category.html',vals)
@requires_admin
def post(self):
def check(cate):
parent=cate.parent_cat
skey=cate.key()
while parent:
if parent.key()==skey:
return False
parent=parent.parent_cat
return True
action=self.param("action")
name=self.param("name")
slug=self.param("slug")
parentkey=self.param('parentkey')
key=self.param('key')
vals={'action':action,'postback':True}
try:
if action=='add':
cat= Category(name=name,slug=slug)
if not (name and slug):
raise Exception(_('Please input name and slug.'))
if parentkey:
cat.parent_cat=Category.get(parentkey)
cat.put()
self.redirect('/admin/categories')
#vals.update({'result':True,'msg':_('Saved ok')})
#self.render2('views/admin/category.html',vals)
elif action=='edit':
cat=Category.get(key)
cat.name=name
cat.slug=slug
if not (name and slug):
raise Exception(_('Please input name and slug.'))
if parentkey:
cat.parent_cat=Category.get(parentkey)
if not check(cat):
raise Exception(_('A circle declaration found.'))
else:
cat.parent_cat=None
cat.put()
self.redirect('/admin/categories')
except Exception ,e :
if cat.is_saved():
cates=[c for c in Category.all() if c.key()!=cat.key()]
else:
cates= Category.all()
vals.update({'result':False,'msg':e.message,'category':cat,'key':key,'categories':cates})
self.render2('views/admin/category.html',vals)
class admin_status(BaseRequestHandler):
@requires_admin
def get(self):
self.render2('views/admin/status.html',{'cache':memcache.get_stats(),'current':'status','environ':os.environ})
class admin_authors(BaseRequestHandler):
@requires_admin
def get(self):
try:
page_index=int(self.param('page'))
except:
page_index=1
authors=User.all().filter('isAuthor =',True)
entries,pager=Pager(query=authors,items_per_page=15).fetch(page_index)
self.render2('views/admin/authors.html',
{
'current':'authors',
'authors':authors,
'pager':pager
}
)
@requires_admin
def post(self,slug=None):
try:
linkcheck= self.request.get_all('checks')
for key in linkcheck:
author=User.get(key)
author.delete()
finally:
self.redirect('/admin/authors')
class admin_author(BaseRequestHandler):
def __init__(self):
self.current='authors'
@requires_admin
def get(self,slug=None):
action=self.param("action")
author=None
if action and action=='edit':
try:
key=self.param('key')
author=User.get(key)
except:
pass
else:
action='add'
vals={'action':action,'author':author}
self.render2('views/admin/author.html',vals)
@requires_admin
def post(self):
action=self.param("action")
name=self.param("name")
slug=self.param("email")
vals={'action':action,'postback':True}
if not (name and slug):
vals.update({'result':False,'msg':_('Please input dispname and email.')})
self.render2('views/admin/author.html',vals)
else:
if action=='add':
author= User(dispname=name,email=slug )
author.user=db.users.User(slug)
author.put()
vals.update({'result':True,'msg':'Saved ok'})
self.render2('views/admin/author.html',vals)
elif action=='edit':
try:
key=self.param('key')
author=User.get(key)
author.dispname=name
author.email=slug
author.user=db.users.User(slug)
author.put()
if author.isadmin:
g_blog.author=name
self.redirect('/admin/authors')
except:
vals.update({'result':False,'msg':_('Error:Author can''t been saved.')})
self.render2('views/admin/author.html',vals)
class admin_plugins(BaseRequestHandler):
def __init__(self):
self.current='plugins'
@requires_admin
def get(self,slug=None):
vals={'plugins':self.blog.plugins}
self.render2('views/admin/plugins.html',vals)
@requires_admin
def post(self):
action=self.param("action")
name=self.param("plugin")
ret=self.param("return")
self.blog.plugins.activate(name,action=="activate")
if ret:
self.redirect(ret)
else:
vals={'plugins':self.blog.plugins}
self.render2('views/admin/plugins.html',vals)
class admin_plugins_action(BaseRequestHandler):
def __init__(self):
self.current='plugins'
@requires_admin
def get(self,slug=None):
plugin=self.blog.plugins.getPluginByName(slug)
if not plugin :
self.error(404)
return
plugins=self.blog.plugins.filter('active',True)
if not plugin.active:
pcontent=_('''<div>Plugin '%(name)s' havn't actived!</div><br><form method="post" action="/admin/plugins?action=activate&plugin=%(iname)s&return=/admin/plugins/%(iname)s"><input type="submit" value="Activate Now"/></form>''')%{'name':plugin.name,'iname':plugin.iname}
plugins.insert(0,plugin)
else:
pcontent=plugin.get(self)
vals={'plugins':plugins,
'plugin':plugin,
'pcontent':pcontent}
self.render2('views/admin/plugin_action.html',vals)
@requires_admin
def post(self,slug=None):
plugin=self.blog.plugins.getPluginByName(slug)
if not plugin :
self.error(404)
return
plugins=self.blog.plugins.filter('active',True)
if not plugin.active:
pcontent=_('''<div>Plugin '%(name)s' havn't actived!</div><br><form method="post" action="/admin/plugins?action=activate&plugin=%(iname)s&return=/admin/plugins/%(iname)s"><input type="submit" value="Activate Now"/></form>''')%{'name':plugin.name,'iname':plugin.iname}
plugins.insert(0,plugin)
else:
pcontent=plugin.post(self)
vals={'plugins':plugins,
'plugin':plugin,
'pcontent':pcontent}
self.render2('views/admin/plugin_action.html',vals)
class WpHandler(BaseRequestHandler):
@requires_admin
def get(self,tags=None):
try:
all=self.param('all')
except:
all=False
if(all):
entries = Entry.all().order('-date')
else:
str_date_begin=self.param('date_begin')
str_date_end=self.param('date_end')
try:
date_begin=datetime.strptime(str_date_begin,"%Y-%m-%d")
date_end=datetime.strptime(str_date_end,"%Y-%m-%d")
entries = Entry.all().filter('date >=',date_begin).filter('date <',date_end).order('-date')
except:
self.render2('views/admin/404.html')
return
cates=Category.all()
tags=Tag.all()
self.response.headers['Content-Type'] = 'binary/octet-stream'#'application/atom+xml'
self.render2('views/wordpress.xml',{'entries':entries,'cates':cates,'tags':tags})
class Upload(BaseRequestHandler):
@requires_admin
def post(self):
name = self.param('filename')
mtype = self.param('fileext')
bits = self.param('upfile')
Media(name = name, mtype = mtype, bits = bits).put()
self.redirect('/admin/filemanager')
class UploadEx(BaseRequestHandler):
@requires_admin
def get(self):
extstr=self.param('ext')
ext=extstr.split('|')
files=Media.all()
if extstr!='*':
files=files.filter('mtype IN',ext)
self.render2('views/admin/upload.html',{'ext':extstr,'files':files})
@requires_admin
def post(self):
ufile=self.request.params['userfile']
#if ufile:
name=ufile.filename
mtype =os.path.splitext(name)[1][1:]
bits = self.param('userfile')
media=Media(name = name, mtype = mtype, bits = bits)
media.put()
self.write(simplejson.dumps({'name':media.name,'size':media.size,'id':str(media.key())}))
class FileManager(BaseRequestHandler):
def __init__(self):
self.current='files'
@requires_admin
def get(self):
try:
page_index=int(self.param('page'))
except:
page_index=1
files = Media.all().order('-date')
files,links=Pager(query=files,items_per_page=15).fetch(page_index)
self.render2('views/admin/filemanager.html',{'files' : files,'pager':links})
@requires_admin
def post(self): # delete files
delids = self.request.POST.getall('del')
if delids:
for id in delids:
file = Media.get_by_id(int(id))
file.delete()
self.redirect('/admin/filemanager')
class admin_main(BaseRequestHandler):
@requires_admin
def get(self,slug=None):
if self.is_admin:
self.redirect('/admin/setup')
else:
self.redirect('/admin/entries/post')
class admin_ThemeEdit(BaseRequestHandler):
@requires_admin
def get(self,slug):
zfile=zipfile.ZipFile(os.path.join(rootpath,"themes",slug+".zip"))
newfile=zipfile.ZipFile('')
for item in zfile.infolist():
self.write(item.filename+"<br>")
def main():
webapp.template.register_template_library('filter')
webapp.template.register_template_library('app.recurse')
application = webapp.WSGIApplication(
[
('/admin/{0,1}',admin_main),
('/admin/setup',admin_setup),
('/admin/entries/(post|page)',admin_entries),
('/admin/links',admin_links),
('/admin/categories',admin_categories),
('/admin/comments',admin_comments),
('/admin/link',admin_link),
('/admin/category',admin_category),
('/admin/(post|page)',admin_entry),
('/admin/status',admin_status),
('/admin/authors',admin_authors),
('/admin/author',admin_author),
('/admin/import',admin_import),
('/admin/tools',admin_tools),
('/admin/plugins',admin_plugins),
('/admin/plugins/(\w+)',admin_plugins_action),
('/admin/sitemap',admin_sitemap),
('/admin/export/micolog.xml',WpHandler),
('/admin/do/(\w+)',admin_do_action),
('/admin/lang',setlanguage),
('/admin/theme/edit/(\w+)',admin_ThemeEdit),
('/admin/upload', Upload),
('/admin/filemanager', FileManager),
('/admin/uploadex', UploadEx),
('.*',Error404),
],debug=True)
g_blog.application=application
g_blog.plugins.register_handlerlist(application)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A simple wrapper for Django templates.
The main purpose of this module is to hide all of the package import pain
you normally have to go through to get Django to work. We expose the Django
Template and Context classes from this module, handling the import nonsense
on behalf of clients.
Typical usage:
from google.appengine.ext.webapp import template
print template.render('templates/index.html', {'foo': 'bar'})
Django uses a global setting for the directory in which it looks for templates.
This is not natural in the context of the webapp module, so our load method
takes in a complete template path, and we set these settings on the fly
automatically. Because we have to set and use a global setting on every
method call, this module is not thread safe, though that is not an issue
for applications.
Django template documentation is available at:
http://www.djangoproject.com/documentation/templates/
"""
import md5
import os,logging
try:
from django import v0_96
except ImportError:
pass
import django
import django.conf
try:
django.conf.settings.configure(
DEBUG=False,
TEMPLATE_DEBUG=False,
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.load_template_source',
),
)
except (EnvironmentError, RuntimeError):
pass
import django.template
import django.template.loader
from google.appengine.ext import webapp
def render(theme,template_file, template_dict, debug=False):
"""Renders the template at the given path with the given dict of values.
Example usage:
render("templates/index.html", {"name": "Bret", "values": [1, 2, 3]})
Args:
template_path: path to a Django template
template_dict: dictionary of values to apply to the template
"""
t = load(theme,template_file, debug)
return t.render(Context(template_dict))
template_cache = {}
def load(theme,template_file, debug=False):
"""Loads the Django template from the given path.
It is better to use this function than to construct a Template using the
class below because Django requires you to load the template with a method
if you want imports and extends to work in the template.
"""
#template_file=os.path.join("templates",template_file)
if theme.isZip:
theme_path=theme.server_dir
else:
theme_path=os.path.join( theme.server_dir,"templates")
abspath =os.path.join( theme_path,template_file)
logging.debug("theme_path:%s,abspath:%s"%(theme_path,abspath))
if not debug:
template = template_cache.get(abspath, None)
else:
template = None
if not template:
#file_name = os.path.split(abspath)
new_settings = {
'TEMPLATE_DIRS': (theme_path,),
'TEMPLATE_DEBUG': debug,
'DEBUG': debug,
}
old_settings = _swap_settings(new_settings)
try:
template = django.template.loader.get_template(template_file)
finally:
_swap_settings(old_settings)
if not debug:
template_cache[abspath] = template
def wrap_render(context, orig_render=template.render):
URLNode = django.template.defaulttags.URLNode
save_urlnode_render = URLNode.render
old_settings = _swap_settings(new_settings)
try:
URLNode.render = _urlnode_render_replacement
return orig_render(context)
finally:
_swap_settings(old_settings)
URLNode.render = save_urlnode_render
template.render = wrap_render
return template
def _swap_settings(new):
"""Swap in selected Django settings, returning old settings.
Example:
save = _swap_settings({'X': 1, 'Y': 2})
try:
...new settings for X and Y are in effect here...
finally:
_swap_settings(save)
Args:
new: A dict containing settings to change; the keys should
be setting names and the values settings values.
Returns:
Another dict structured the same was as the argument containing
the original settings. Original settings that were not set at all
are returned as None, and will be restored as None by the
'finally' clause in the example above. This shouldn't matter; we
can't delete settings that are given as None, since None is also a
legitimate value for some settings. Creating a separate flag value
for 'unset' settings seems overkill as there is no known use case.
"""
settings = django.conf.settings
old = {}
for key, value in new.iteritems():
old[key] = getattr(settings, key, None)
setattr(settings, key, value)
return old
def create_template_register():
"""Used to extend the Django template library with custom filters and tags.
To extend the template library with a custom filter module, create a Python
module, and create a module-level variable named "register", and register
all custom filters to it as described at
http://www.djangoproject.com/documentation/templates_python/
#extending-the-template-system:
templatefilters.py
==================
register = webapp.template.create_template_register()
def cut(value, arg):
return value.replace(arg, '')
register.filter(cut)
Then, register the custom template module with the register_template_library
function below in your application module:
myapp.py
========
webapp.template.register_template_library('templatefilters')
"""
return django.template.Library()
def register_template_library(package_name):
"""Registers a template extension module to make it usable in templates.
See the documentation for create_template_register for more information."""
if not django.template.libraries.get(package_name, None):
django.template.add_to_builtins(package_name)
Template = django.template.Template
Context = django.template.Context
def _urlnode_render_replacement(self, context):
"""Replacement for django's {% url %} block.
This version uses WSGIApplication's url mapping to create urls.
Examples:
<a href="{% url MyPageHandler "overview" %}">
{% url MyPageHandler implicit_args=False %}
{% url MyPageHandler "calendar" %}
{% url MyPageHandler "jsmith","calendar" %}
"""
args = [arg.resolve(context) for arg in self.args]
try:
app = webapp.WSGIApplication.active_instance
handler = app.get_registered_handler_by_name(self.view_name)
return handler.get_url(implicit_args=True, *args)
except webapp.NoUrlFoundError:
return ''
| Python |
import cgi
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class MainPage(webapp.RequestHandler):
def get(self):
self.response.out.write("""
<html>
<body>
<form action="/sign" method="post">
<div><textarea name="content" rows="3" cols="60"></textarea></div>
<div><input type="submit" value="Sign Guestbook"></div>
</form>
</body>
</html>""")
class Guestbook(webapp.RequestHandler):
def post(self):
user = users.get_current_user()
if user:
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello, ' + user.nickname())
else:
self.redirect(users.create_login_url(self.request.uri))
self.response.out.write('aaaa')
application = webapp.WSGIApplication(
[('/', MainPage),('/sign',Guestbook)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main() | Python |
from . import CompoundElement
import bs4
import six
"""The purpose of this module is to provide classes corresponding to
most elements (except <style>, <script> and similar non-document
content elements) and core attributes (except style and the %events
attributes) of HTML4.01 and HTML5. It is not totally compliant with
the HTML4.01 and HTML5 standards, but is enough to model most
real-world HTML. It contains no provisions to ensure that elements of
a particular kind only contain allowed sub-elements."""
# abstract class
class HTMLElement(CompoundElement): pass
class HTML(HTMLElement): pass
class Head(HTMLElement): pass
# a title cannot contain subelements -- derive from UnicodeElement instead?
class Title(HTMLElement): pass
class Body(HTMLElement):
def as_xhtml(self, uri):
element = super(Body, self).as_xhtml(uri)
element.set('about', uri)
return element
# %block
class P(HTMLElement): pass
# %heading
class H1(HTMLElement): pass
class H2(HTMLElement): pass
class H3(HTMLElement): pass
class H4(HTMLElement): pass
class H5(HTMLElement): pass
class H6(HTMLElement): pass
# %list
class UL(HTMLElement): pass
class OL(HTMLElement): pass
class LI(HTMLElement): pass
# %preformatted
class Pre(HTMLElement): pass
# other
class DL(HTMLElement): pass
class DT(HTMLElement): pass
class DD(HTMLElement): pass
class Div(HTMLElement): pass
class Blockquote(HTMLElement): pass
class Form(HTMLElement): pass
class HR(HTMLElement): pass
class Table(HTMLElement): pass
class Fieldset(HTMLElement): pass
class Address(HTMLElement): pass
# %fontstyle
class TT (HTMLElement): pass
class I (HTMLElement): pass
class B (HTMLElement): pass
class U (HTMLElement): pass
class Big (HTMLElement): pass
class Small(HTMLElement): pass
# %phrase
class Em (HTMLElement): pass
class Strong (HTMLElement): pass
class Dfn (HTMLElement): pass
class Code (HTMLElement): pass
class Samp (HTMLElement): pass
class Kbd (HTMLElement): pass
class Var (HTMLElement): pass
class Cite (HTMLElement): pass
class Abbr (HTMLElement): pass
class Acronym(HTMLElement): pass
# %special
class A (HTMLElement): pass
class Img (HTMLElement): pass
class Object (HTMLElement): pass
class Br (HTMLElement): pass
class Q (HTMLElement): pass
class Sub (HTMLElement): pass
class Sup (HTMLElement): pass
class Span (HTMLElement): pass
class BDO(HTMLElement): pass
# %form
class Input(HTMLElement): pass
class Select(HTMLElement): pass
class Textarea(HTMLElement): pass
class Label(HTMLElement): pass
class Button(HTMLElement): pass
# table
class Caption(HTMLElement): pass
class Thead(HTMLElement): pass
class Tfoot(HTMLElement): pass
class Tbody(HTMLElement): pass
class Colgroup(HTMLElement): pass
class Col(HTMLElement): pass
class TR(HTMLElement): pass
class TH(HTMLElement): pass
class TD(HTMLElement): pass
# very special?
class Ins(HTMLElement): pass
class Del(HTMLElement): pass
# new in HTML5 -- cannot be simply expressed in XHTML 1.1. Instead
# they're expressed as eg. '<div class="section">'
class HTML5Element(HTMLElement):
tagname = "div"
def _get_classname(self):
return self.__class__.__name__.lower()
classname = property(_get_classname)
class Article(HTML5Element): pass
class Aside(HTML5Element): pass
class Bdi(HTML5Element): pass
class Details(HTML5Element): pass
class Dialog(HTML5Element): pass
class Summary(HTML5Element): pass
class Figure(HTML5Element): pass
class Figcaption(HTML5Element): pass
class Footer(HTML5Element): pass
class Header(HTML5Element): pass
class Hgroup(HTML5Element): pass
class Mark(HTML5Element): pass
class Meter(HTML5Element): pass
class Nav(HTML5Element): pass
class Progress(HTML5Element): pass
class Ruby(HTML5Element): pass
class Rt(HTML5Element): pass
class Rp(HTML5Element): pass
class Section(HTML5Element): pass
class Time(HTML5Element): pass
class Wbr(HTML5Element): pass
# audio, video, embed, canvas and similar non structural/semantic
# elements not inclided
# For use by elements_from_soup. FIXME: we should be able to build
# _tagmap dynamically.
_tagmap = {'html': HTML,
'head': Head,
'title': Title,
'body': Body,
'p': P,
'h1': H1,
'h2': H2,
'h3': H3,
'h4': H4,
'h5': H5,
'h6': H6,
'ul': UL,
'ol': OL,
'li': LI,
'pre': Pre,
'dl': DL,
'dt': DT,
'dd': DD,
'div': Div,
'blockquote': Blockquote,
'form': Form,
'hr': HR,
'table': Table,
'fieldset': Fieldset,
'address': Address,
'tt': TT,
'i': I,
'b': B,
'u': U,
'big': Big,
'small': Small,
'em': Em,
'strong': Strong,
'dfn': Dfn,
'code': Code,
'samp': Samp,
'kbd': Kbd,
'var': Var,
'cite': Cite,
'abbr': Abbr,
'acronym': Acronym,
'a': A,
'img': Img,
'object': Object,
'br': Br,
'q': Q,
'sub': Sub,
'sup': Sup,
'span': Span,
'bdo': BDO,
'input': Input,
'select': Select,
'textarea': Textarea,
'label': Label,
'button': Button,
'caption': Caption,
'thead': Thead,
'tfoot': Tfoot,
'tbody': Tbody,
'colgroup': Colgroup,
'col': Col,
'tr': TR,
'th': TH,
'td': TD,
'ins': Ins,
'del': Del,
'article': Article,
'aside': Aside,
'bdi': Bdi,
'details': Details,
'dialog': Dialog,
'summary': Summary,
'figure': Figure,
'figcaption': Figcaption,
'footer': Footer,
'header': Header,
'hgroup': Hgroup,
'mark': Mark,
'meter': Meter,
'nav': Nav,
'progress': Progress,
'ruby': Ruby,
'rt': Rt,
'rp': Rp,
'section': Section,
'time': Time,
'wbr': Wbr
}
def elements_from_soup(soup,
remove_tags=['script','style','font','map','center'],
keep_attributes=['class','id','dir','lang','src','href','name','alt']):
"""Returns a tree of ferenda.html elements from a BeautifulSoup
tree, removing some tags and attributes in the process."""
if soup.name in remove_tags:
return None
if soup.name not in _tagmap:
# self.log.warning("Can't render %s" % soup.name)
# FIXME: Get ahold of the log object somehow
print("WARNING: Can't render %s" % soup.name)
return None
attrs = {}
for attr in keep_attributes:
if attr in soup.attrs:
# print(" %s has attr %s" % (soup.name,attr))
if isinstance(soup[attr],list):
attrs[attr] = " ".join(soup[attr])
else:
attrs[attr] = soup[attr]
# print("%s: %r" % (soup.name, attrs))
element = _tagmap[soup.name](**attrs)
#print("%sNode: %s" % ((depth-1)*". ",soup.name))
for child in soup.children:
if isinstance(child,bs4.element.Comment):
#print("%sChild comment" % (depth*". "))
pass
elif isinstance(child,bs4.NavigableString):
#print("%sChild string %r" % (depth*". ",child[:10]))
if six.text_type(child).strip() != "": # ignore pure whitespace between tags
element.append(six.text_type(child)) # convert NavigableString to pure str
else:
#print("%sChild %s" % (depth*". ",soup.name))
subelement = elements_from_soup(child,remove_tags,keep_attributes)
if subelement != None:
element.append(subelement)
return element
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Several base datatypes that inherit from native types
(unicode,list,dict, etc) or python defined types (datetime), but adds
support for general attributes (properties). The attributes are set when
instaniated (in the constructor call, as named arguments). Once an
object has been instansiated, you cannot add any more attributes."""
import datetime
import re
import sys
import logging
import xml.etree.cElementTree as ET
from lxml.builder import ElementMaker
from operator import itemgetter
import six
from six import text_type as str
from rdflib import Graph, Namespace, Literal, URIRef
try:
import pyparsing
pyparsing_available = True
except ImportError:
pyparsing_available = False
from ferenda import util
DCT = Namespace(util.ns['dct'])
RDF = Namespace(util.ns['rdf'])
XML_LANG = "{http://www.w3.org/XML/1998/namespace}lang"
log = logging.getLogger(__name__)
E = ElementMaker(namespace="http://www.w3.org/1999/xhtml")
class AbstractElement(object):
"""Base class. You should only inherit from this if you define new
types directly based on python types."""
def __new__(cls):
obj = super(AbstractElement, cls).__new__(cls)
object.__setattr__(obj, '__initialized', False)
return obj
def __init__(self, *args, **kwargs):
for (key, val) in list(kwargs.items()):
object.__setattr__(self, key, val)
# Declare this instance ready for usage. Note that derived
# objects must do their own initialization first, before
# calling the superclass constructor (i.e. this function),
# since this effectively "seals" the instance.
#
# (we need to call object.__setattr__ directly to bypass our
# own __setattr__ implementation)
object.__setattr__(self, '__initialized', True)
def __setattr__(self, name, value):
if object.__getattribute__(self, '__initialized'):
# initialization phase is over -- no new attributes should
# be created. Check to see if the attribute exists -- if it
# doesn't, we raise an AttributeError (with a sensible
# error message)
try:
object.__getattribute__(self, name)
object.__setattr__(self, name, value)
except AttributeError:
raise AttributeError("Can't set attribute '%s' on object '%s' after initialization" % (name, self.__class__.__name__))
else:
# Still in initialization phase -- ok to create new
# attributes
object.__setattr__(self, name, value)
def _get_tagname(self):
return self.__class__.__name__.lower()
tagname = property(_get_tagname, "Docstring here")
classname = None
"""Docstring here"""
def as_xhtml(self, uri=None):
attrs = {}
for stdattr in ('class', 'id', 'dir', 'lang', 'src', 'href', 'name', 'alt', 'role'):
if hasattr(self,stdattr):
attrs[stdattr] = getattr(self,stdattr)
return E(self.tagname, attrs, str(self))
class UnicodeElement(AbstractElement, six.text_type):
"""Contains a string value of some kind, but can also have other
properties (such as ordinal label, date of enactment, etc)."""
# immutable objects (like strings, unicode, etc) must provide a __new__ method
def __new__(cls, arg='', *args, **kwargs):
if not isinstance(arg, six.text_type):
if sys.version_info < (3,0,0):
raise TypeError("%r is not unicode" % arg)
else:
raise TypeError("%r is not str" % arg)
# obj = str.__new__(cls, arg)
obj = six.text_type.__new__(cls,arg)
object.__setattr__(obj, '__initialized', False)
return obj
class IntElement(AbstractElement, int):
"""Contains an integer, but can also have other properties (such as ...)."""
# immutable objects must provide a __new__ method
def __new__(cls, arg=0, *args, **kwargs):
if not isinstance(arg, int):
raise TypeError("%r is not int" % arg)
obj = int.__new__(cls, arg)
object.__setattr__(obj, '__initialized', False)
return obj
class DateElement(AbstractElement, datetime.date):
"""Contains a :py:class:`datetime.date`, but can also have other properties."""
# immutable objects must provide a __new__ method
def __new__(cls, arg=datetime.date.today(), *args, **kwargs):
if not isinstance(arg, datetime.date):
raise TypeError("%r is not datetime.date" % arg)
obj = datetime.date.__new__(cls, arg.year, arg.month, arg.day)
object.__setattr__(obj, '__initialized', False)
return obj
class CompoundElement(AbstractElement, list):
"""Works like a list of other :py:class:`AbstractElement` objects, but can also have properties of it's own."""
def __new__(cls, arg=[], *args, **kwargs):
# ideally, we'd like to do just "obj = list.__new__(cls,arg)"
# but that doesn't seem to work
obj = list.__new__(cls)
obj.extend(arg)
object.__setattr__(obj, '__initialized', False)
return obj
def as_xhtml(self, uri=None):
"""Returns a lxml.etree object (with children)"""
children = []
# start by handling all children recursively
for subpart in self:
if (isinstance(subpart, AbstractElement) or hasattr(subpart, 'as_xhtml')):
node = subpart.as_xhtml(uri)
if node is not None:
children.append(node)
elif isinstance(subpart, str):
children.append(subpart)
else:
log.warning("as_xhtml: Can't render %s instance" %
subpart.__class__.__name__)
# this is a reasonable attempt
children.append(str(subpart))
# Then massage a list of attributes for the main node
attrs = {}
if self.classname is not None:
attrs['class'] = self.classname
# copy (a subset of) standard xhtml attributes
for stdattr in ('class', 'id', 'dir', 'lang', 'src', 'href', 'name', 'alt', 'role', 'typeof'):
if hasattr(self,stdattr):
attrs[stdattr] = getattr(self,stdattr)
# create extra attributes depending on circumstances
if hasattr(self,'uri'):
attrs['about'] = self.uri
if hasattr(self,'uri') and hasattr(self,'meta'):
assert isinstance(self.meta,Graph), "self.meta is %r, not rdflib.Graph" % type(self.meta)
# we sort to get a predictable order (by predicate)
for (s,p,o) in sorted(self.meta, key=itemgetter(1)):
if s != URIRef(self.uri):
continue
if p == RDF.type:
attrs['typeof'] = self.meta.qname(o)
# attrs['rev'] = self.meta.qname(DCT.isPartOf)
elif p == DCT.title:
attrs['property'] = self.meta.qname(p)
attrs['content'] = o.toPython()
else:
children.insert(0, self._span(s,p,o,self.meta))
return E(self.tagname, attrs, *children)
def _span(self, subj, pred, obj, graph):
"""Returns any triple as a span element with rdfa attributes. Object
can be a uriref or literal, subject must be a
uriref. Bnodes not supported. Recursively creates sub-span
elements with for each uriref object that is the subject in
another triple in graph.
"""
children = []
if isinstance(obj,Literal):
o_python = obj.toPython()
if isinstance(o_python, datetime.date):
o_python = o_python.isoformat()
attrs = {
# 'about':self.uri,
'property':self.meta.qname(pred),
'content': o_python
}
if obj.datatype:
attrs['datatype'] = self.meta.qname(obj.datatype)
else:
# only datatype-less literals can have language
attrs[XML_LANG] = obj.language if obj.language else ''
elif isinstance(obj,URIRef):
attrs = {
# 'about':self.uri,
# 'about': str(obj),
'rel':self.meta.qname(pred),
'href':str(obj)
}
for sub_pred, sub_obj in graph.predicate_objects(subject=obj):
children.append(self._span(obj, sub_pred, sub_obj, graph))
else:
raise ValueError("Type %s not supported as object" % type(obj))
return E('span', attrs, *children)
class MapElement(AbstractElement, dict):
"""A MapElement is an adjusted dict"""
def __new__(cls, arg={}, *args, **kwargs):
# ideally, we'd like to do just "obj = dict.__new__(cls,arg)"
# but that doesn't seem to work
obj = dict.__new__(cls, arg)
obj.update(arg)
object.__setattr__(obj, '__initialized', False)
return obj
# Abstract classes intendet to use with multiple inheritance, which
# adds common properties
class TemporalElement(object):
"""A TemporalElement has a number of temporal properties
(entryintoforce, expires) which states the temporal frame of the
object"""
def __init__(self):
self.entryintoforce = None
self.expires = None
def in_effect(self, date=None):
if not date:
date = datetime.date.today()
return (date >= self.entryintoforce) and (date <= self.expires)
class OrdinalElement(object):
"""A OrdinalElement has a explicit ordinal number. The ordinal does not need to be strictly numerical, but can be eg. '6 a' (which is larger than 6, but smaller than 7)."""
def __init__(self):
self.ordinal = None
# FIXME: do a proper mostly-numerical compariom using util.numcmp
def __lt__(self, other):
return self.ordinal < other.ordinal
def __le__(self, other):
return self.ordinal <= other.ordinal
def __eq__(self, other):
return self.ordinal == other.ordinal
def __ne__(self, other):
return self.ordinal != other.ordinal
def __gt__(self, other):
return self.ordinal > other.ordinal
def __ge__(self, other):
return self.ordinal == other.ordinal
from ferenda import util
class PredicateType(object):
"""Inheriting from this gives the subclass a predicate attribute,
which describes the RDF predicate to which the class is the RDF
subject (eg. if you want to model the title of a document, you
would inherit from UnicodeElement and this, and then set
.predicate to
rdflib.URIRef('http://purl.org/dc/elements/1.1/title')"""
def __init__(self, *args, **kwargs):
if 'predicate' in kwargs:
self.predicate = kwargs['predicate']
# switch the full uriref
# (http://rinfo.lagrummet...#paragraf) to one using a
# namespace prefix, if we know of one:
shorten = False
for (prefix, ns) in list(util.ns.items()):
if kwargs['predicate'].startswith(ns):
predicateuri = kwargs['predicate']
kwargs['predicate'] = kwargs[
'predicate'].replace(ns, prefix + ":")
# print "Shorten predicate %s to: %s" % (predicateuri, kwargs['predicate'])
shorten = True
#if not shorten:
# print "Couldn't shorten predicate: %s" % self.predicate
else:
# From the RDF Schema spec: 'This is the class of
# everything. All other classes are subclasses of this
# class.'
from rdflib import RDFS
self.predicate = RDFS.Resource
super(PredicateType, self).__init__(*args, **kwargs)
class Link(UnicodeElement): # just a unicode string with a .uri attribute
tagname = 'a'
def __repr__(self):
return 'Link(\'%s\',uri=%r)' % (six.text_type.__repr__(self), self.uri)
def as_xhtml(self, uri):
element = super(Link, self).as_xhtml(uri)
if hasattr(self,'uri'):
element.set('href', self.uri)
return element
class LinkSubject(PredicateType, Link):
pass # A RDFish link
def serialize(root):
t = __serializeNode(root)
__indentTree(t)
return ET.tostring(t, 'utf-8').decode('utf-8') + "\n"
# http://infix.se/2007/02/06/gentlemen-indent-your-xml
def __indentTree(elem, level=0):
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
indent(e, level + 1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def __serializeNode(node, serialize_hidden_attrs=False):
# print "serializing: %r" % node
# Special handling of pyparsing.ParseResults -- deserializing of
# these won't work (easily)
if pyparsing_available and isinstance(node, pyparsing.ParseResults):
return ET.XML(node.asXML())
# We use type() instead of isinstance() because we want to
# serialize str derived types using their correct class names
if type(node) == six.text_type:
nodename = "str"
elif type(node) == six.binary_type:
nodename = "bytes"
else:
nodename = node.__class__.__name__
e = ET.Element(nodename)
if hasattr(node, '__dict__'):
for key in [x for x in list(node.__dict__.keys()) if serialize_hidden_attrs or not x.startswith('_')]:
val = node.__dict__[key]
if (isinstance(val, (six.text_type,six.binary_type))):
e.set(key, val)
else:
e.set(key, repr(val))
if isinstance(node, (six.text_type,six.binary_type)):
if node:
e.text = node
elif isinstance(node, int):
e.text = str(node)
elif isinstance(node, list):
for x in node:
e.append(__serializeNode(x))
elif isinstance(node, dict):
for x in list(node.keys()):
k = ET.Element("Key")
k.append(__serializeNode(x))
e.append(k)
v = ET.Element("Value")
v.append(__serializeNode(node[x]))
e.append(v)
else:
e.text = repr(node)
# raise TypeError("Can't serialize %r (%r)" % (type(node), node))
return e
def deserialize(xmlstr, caller_globals):
"""This function is highly insecure -- use only with trusted data"""
# print "Caller globals()"
# print repr(caller_globals.keys())
# print "Callee globals()"
# print repr(globals().keys())
# print repr(locals().keys())
if (isinstance(xmlstr, str)):
xmlstr = xmlstr.encode('utf-8')
t = ET.fromstring(xmlstr)
return __deserializeNode(t, caller_globals)
def __deserializeNode(elem, caller_globals):
# print "element %r, attrs %r" % (elem.tag, elem.attrib)
#kwargs = elem.attrib specialcasing first -- classobjects for
# these native objects can't be created by the"caller_globals[elem.tag]" call below
if elem.tag == 'int':
i = 0
classobj = i.__class__
elif elem.tag == 'str':
i = ''
classobj = i.__class__
# flake8 craps out on byte literals?!
# elif elem.tag == 'bytes':
# i = b''
# classobj = i.__class__
elif elem.tag == 'unicode':
raise ValueError("Cannot deserialize 'unicode' (should be str?)")
else:
# print "creating classobj for %s" % elem.tag
classobj = caller_globals[elem.tag]
testclass = classobj(**elem.attrib)
if isinstance(testclass, str):
c = classobj(str(elem.text), **elem.attrib)
elif isinstance(classobj(**elem.attrib), int):
c = classobj(int(elem.text), **elem.attrib)
elif isinstance(testclass, str):
if elem.text:
c = classobj(str(elem.text), **elem.attrib)
else:
c = classobj(**elem.attrib)
elif isinstance(testclass, datetime.date):
m = re.match(r'\w+\((\d+), (\d+), (\d+)\)', elem.text)
basedate = datetime.date(
int(m.group(1)), int(m.group(2)), int(m.group(3)))
c = classobj(basedate, **elem.attrib)
elif isinstance(testclass, dict):
c = classobj(**elem.attrib)
# FIXME: implement this
else:
c = classobj(**elem.attrib)
for subelem in elem:
# print "Recursing"
c.append(__deserializeNode(subelem, caller_globals))
return c
# in-place prettyprint formatter
def indent(elem, level=0):
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for elem in elem:
indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#----------------------------------------------------------------
# Examples of other mixins and inherited classes
class EvenMixin():
def iseven(self):
return (len(self.keyword) % 2 == 0)
class DerivedUnicode(UnicodeElement, EvenMixin):
# an example on how to customize object initialization, while still
# letting the base class do it's initialization
def __init__(self, *args, **kwargs):
if kwargs['keyword']:
self.keyword = kwargs['keyword'].upper()
del kwargs['keyword']
super(DerivedUnicode, self).__init__(*args, **kwargs)
class DerivedList(CompoundElement, EvenMixin):
pass
class DerivedDict(MapElement, EvenMixin):
pass
class DerivedInt(IntElement, EvenMixin):
pass
class DerivedDate(DateElement, EvenMixin):
pass
class RDFString(PredicateType, UnicodeElement):
# N.B: if we inherit from (UnicodeElement,PredicateType)
# instead, PredicateType.__init__ never gets called! But this way,
# AbstractElement.__init__ never gets called. I think i must
# read descrintro again...
pass
class UnicodeSubject(PredicateType, UnicodeElement): pass
class Body(CompoundElement):
def as_xhtml(self, uri):
element = super(Body, self).as_xhtml(uri)
element.set('about', uri)
return element
class Page(CompoundElement, OrdinalElement): pass
class Nav(CompoundElement): pass
class SectionalElement(CompoundElement):
tagname = "div"
def _get_classname(self):
return self.__class__.__name__.lower()
classname = property(_get_classname)
def as_xhtml(self, baseuri):
if hasattr(self, 'uri'):
newuri = self.uri
else:
newuri = baseuri + "#S%s" % self.ordinal
element = super(SectionalElement, self).as_xhtml(baseuri)
if not hasattr(self, 'uri') or not hasattr(self, 'meta'):
element.set('property', 'dct:title')
element.set('content', self.title)
element.set('typeof', 'bibo:DocumentPart')
element.set('about', newuri)
# NOTE: we don't set xml:lang for either the main @content
# or the @content in the below <span> -- the data does not
# originate from RDF and so isn't typed like that.
attrs = {'about': newuri,
'property': 'bibo:chapter',
'content': self.ordinal}
element.insert(0,E('span',attrs))
return element
class Section(SectionalElement): pass
class Subsection(SectionalElement): pass
class Subsubsection(SectionalElement): pass
class Paragraph(CompoundElement):
tagname = 'p'
class Preformatted(Paragraph): pass
class Heading(CompoundElement, OrdinalElement):
tagname = 'h1' # fixme: take level into account
class Footnote(CompoundElement): pass
class OrderedList(CompoundElement):
tagname = 'ol'
class UnorderedList(CompoundElement):
tagname = 'ul'
class DefinitionList(CompoundElement):
tagname = 'dl'
class Term(CompoundElement): pass
class Definition(CompoundElement): pass
class ListItem(CompoundElement, OrdinalElement):
tagname = 'li'
if __name__ == '__main__':
# print "Testing DerivedUnicode"
u = DerivedUnicode('blahonga', keyword='myunicode')
# print "\trepr(u): %s" % repr(u)
# print "\tu[1:4]: %r" % u[1:4]
# print "\tu.keyword: %r" % u.keyword
# print "\tu.iseven: %r" % u.iseven()
# print "Testing DerivedList"
l = DerivedList(['x', 'y', 'z'], keyword='mylist')
# print "\tl[1]: %r" % l[1]
# print "\tl.keyword: %r" % l.keyword
# print "\tl.iseven: %r" % l.iseven()
# print "Testing DerivedDict"
d = DerivedDict({'a': 'foo', 'b': 'bar'}, keyword='mydict')
# print "\td['a']: %r" % d['a']
# print "\td.keyword: %r" % d.keyword
# print "\td.iseven: %r" % d.iseven()
# print "Testing DerivedInt"
i = DerivedInt(42, keyword='myint')
# print "\ti: %r" % i
# print "\ti+5: %r" % (i+5)
# print "\ti.keyword: %r" % d.keyword
# print "\ti.iseven: %r" % d.iseven()
# print "Testing DerivedDate"
nativedate = datetime.date(2008, 3, 15)
dt = DerivedDate(nativedate, keyword='mydate')
# print "\tdt: %r" % dt
# print "\tdt.keyword: %r" % dt.keyword
# print "\tdt.iseven: %r" % dt.iseven()
# print "Testing RDFString"
r = RDFString('Typisk dokumentrubrik', keyword='mysubject')
# print "\trepr(r): %s" % repr(r)
# print "\tr[1:4]: %r" % r[1:4]
# print "\tr.keyword: %r" % r.keyword
# print "\tr.predicate: %r" % r.predicate
from rdflib import URIRef
r.predicate = URIRef('http://purl.org/dc/terms/title')
# print "\tr.predicate: %r" % r.predicate
c = DerivedList([u, l, d, i, dt, r])
x = serialize(c)
print(x)
print()
y = deserialize(x, globals())
print((serialize(y)))
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import xml.etree.cElementTree as ET
import os
from io import BytesIO
from rdflib import Literal, BNode, Namespace, URIRef
from rdflib import Graph
from rdflib.plugins.parsers.ntriples import NTriplesParser
from rdflib import Namespace, URIRef, Literal, RDFS, RDF, ConjunctiveGraph, plugin, store
import requests
import requests.exceptions
import six
from six import text_type as str
if six.PY3:
from urllib.parse import quote
else:
from urllib import quote
class SparqlError(Exception):
pass
class TriplestoreError(Exception):
pass
from ferenda.thirdparty import SQLite
class TripleStore(object):
"""Wrapper around the HTTP interface of some triple stores (Fuseki
and Sesame, right now), particularly focused on bulk upload and
download. Supports querying and update using SPARQL, but does not
implement the RDFlib Store interface."""
# triplestore flavors
FUSEKI = 1
SESAME = 2
SLEEPYCAT = 3 # by way of rdflib
SQLITE = 4 # - "" -
# communication flavors -- not applicable when using SLEEPYCAT/SQLITE
REQUESTS = 1 # pure-python, no external deps
CURL = 2 # requires command line curl binary, faster
# Inspired by http://www.openvest.com/trac/browser/rdfalchemy/trunk/rdfalchemy/sparql/sesame2.py
# see Sesame REST API doc at http://www.openrdf.org/doc/sesame2/system/ch08.html
contenttype = {"xml": "application/rdf+xml",
"sparql": "application/sparql-results+xml",
"nt": "text/plain",
"ttl": "application/x-turtle",
"turtle": "application/x-turtle",
"n3": "text/rdf+n3",
"trix": "application/trix",
"trig": "application/x-trig",
"json": "application/sparql-results+json",
"binary": "application/x-binary-rdf-results-table"}
def __init__(self, location, repository, context=None, storetype=SESAME, communication=REQUESTS, ping=False):
self._closed = False
self.location = location
if self.location.endswith("/"):
self.location = self.location[:-1]
self.repository = repository
self.pending_graph = Graph()
self.namespaces = {}
if isinstance(storetype,str):
if storetype == "SESAME":
self.storetype = self.SESAME
elif storetype == "FUSEKI":
self.storetype = self.FUSEKI
elif storetype == "SLEEPYCAT":
self.storetype = self.SLEEPYCAT
elif storetype == "SQLITE":
self.storetype = self.SQLITE
else:
raise ValueError("Unknown storetype %s" % storetype)
else:
self.storetype = storetype
self.communication = communication
self.context = context
if self.storetype in (self.SLEEPYCAT,self.SQLITE):
if self.storetype == self.SQLITE:
storeid = "SQLite"
else:
storeid = "Sleepycat"
self.graph = ConjunctiveGraph(store=storeid, identifier=URIRef(self.repository))
if os.path.exists(self.location):
ret = self.graph.open(self.location, create=False)
else:
ret = self.graph.open(self.location, create=True)
# Ping the server and see what we have
if ping and storetype == self.SESAME:
requests.get(self.location + '/protocol')
return r.text
def close(self):
if self.storetype in (self.SQLITE,self.SLEEPYCAT) and (not self._closed):
import sqlite3
try:
self.graph.close(True)
except sqlite3.ProgrammingError: # 'Cannot operate on a closed database' -- can't figure out why this happens on win32
pass
self._closed = True
def __del__(self):
self.close()
def _statements_url(self):
assert self.storetype not in (self.SQLITE,self.SLEEPYCAT)
if self.storetype == self.SESAME:
if self.context:
return "%s/repositories/%s/statements?context=<%s>" % (self.location, self.repository, self.context)
else:
return "%s/repositories/%s/statements" % (self.location, self.repository)
else:
if self.context:
return "%s/%s?graph=%s" % (self.location, self.repository, self.context)
else:
return "%s/%s?default" % (self.location, self.repository)
def _endpoint_url(self):
assert self.storetype not in (self.SQLITE,self.SLEEPYCAT)
if self.storetype == self.SESAME:
return "%s/repositories/%s" % (self.location,self.repository)
else:
return "%s/%s/query" % (self.location, self.repository)
def _getcontextgraph(self):
assert self.storetype in (self.SQLITE,self.SLEEPYCAT)
if self.context:
return self.graph.get_context(URIRef(self.context))
else:
return self.graph
def _filter_null_datatype(self, graph):
result = Graph()
# the result graph may contains invalid
# datatype attributes -- filter these
for (s,p,o) in graph:
if isinstance(o,Literal) and o.datatype == URIRef('NULL'):
result.add((s,p,Literal(str(o))))
else:
result.add((s,p,o))
return result
def bind(self, prefix, namespace):
self.namespaces[prefix] = namespace
# print "binding %s as %s" % (namespace,prefix)
self.pending_graph.bind(prefix, namespace)
def initialize_repository(self):
"""Creates the repository, if it's not available already."""
# For Sesame:
# curl -H "Content-type: application/x-turtle" -d @createrepo.ttl http://localhost:8080/openrdf-sesame/repositories/SYSTEM/statements
# where createrepo.ttl is something like:
#
# @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
# @prefix rep: <http://www.openrdf.org/config/repository#>.
# @prefix sr: <http://www.openrdf.org/config/repository/sail#>.
# @prefix sail: <http://www.openrdf.org/config/sail#>.
# @prefix ns: <http://www.openrdf.org/config/sail/native#>.
#
# [] a rep:Repository ;
# rep:repositoryID "netstandards" ;
# rdfs:label "Ferenda repository for netstandards" ;
# rep:repositoryImpl [
# rep:repositoryType "openrdf:SailRepository" ;
# sr:sailImpl [
# sail:sailType "openrdf:NativeStore" ;
# ns:tripleIndexes "spoc,posc,cspo,opsc,psoc"
# ]
# ].
#
# See http://answers.semanticweb.com/questions/16108/createdelete-a-sesame-repository-via-http
# Note in particular
#
# > Just to add one thing I noticed in actually implementing
# > this, the graph created from a template must be POSTed to a
# > named context in the SYSTEM repository otherwise Sesame
# > doesn't like it i.e. if you just post a graph to the SYSTEM
# > repo without a named context Sesame will recognize that it
# > exists but won't be able to load it properly
#
# Good point Rob. In addition, the named context identifier is
# expected to be typed as an instance of type
# sys:RepositoryContext. We are considering to change this
# though, to make doing these kinds of things easier.
if self.storetype in (self.SQLITE,self.SLEEPYCAT):
self.graph.open(self.location, create=True)
else:
pass
def remove_repository(self):
if self.storetype in (self.SQLITE,self.SLEEPYCAT):
self.graph.destroy()
else:
pass
def get_serialized(self, format="nt"):
"""Returns a string containing all statements in the store,
serialized in the selected format. Returns byte string, not unicode array!"""
if self.storetype in (self.SQLITE,self.SLEEPYCAT):
# g = self._filter_null_datatype(self._getcontextgraph())
g = self._getcontextgraph()
return g.serialize(format=format).decode('utf-8').strip()
# FIXME: is utf-8 always the correct encoding?
# return self._getcontextgraph().serialize(format=format) # .decode('utf-8')
else:
r = requests.get(self._statements_url(
), headers={'Accept': self.contenttype[format]})
r.raise_for_status()
return r.text.strip()
def get_serialized_file(self, path, format="nt"):
# FIXME: 1. stream data instead of storing it in a in-memory string
# 2. implement CURL support
data = self.get_serialized(format)
with open(path,"w") as fp:
fp.write(data)
def select(self, query, format="sparql"):
"""query: A SPARQL query with all neccessary prefixes defined.
format: Either one of the standard formats for queries
("sparql", "json" or "binary") -- returns whatever
urlopen.read() returns -- or the special value "python" which
returns a python list of dicts representing rows and columns.
"""
if self.storetype in (self.SQLITE,self.SLEEPYCAT):
g = self._getcontextgraph()
res = g.query(query)
if format == "sparql":
return res.serialize(format="xml")
elif format == "json":
return res.serialize(format="json")
else:
# or just
# return self.sparql_results_to_list(res.serialize(format="xml"))
l = []
for r in res.bindings:
d = {}
for (key,val) in r.items():
d[str(key)]=str(val)
l.append(d)
return l
else:
url = self._endpoint_url()
if "?" in url:
url += "&"
else:
url += "?"
url += "query=" + quote(query.replace("\n", " ")).replace("/","%2F")
headers = {}
if format == "python":
headers['Accept'] = self.contenttype["sparql"]
else:
headers['Accept'] = self.contenttype[format]
try:
results = requests.get(url, headers=headers, data=query)
results.raise_for_status()
if format == "python":
return self.sparql_results_to_list(results.text)
elif format == "json":
return results.json
else:
return results.text
except requests.exceptions.HTTPError as e:
raise SparqlError(e)
def sparql_results_to_list(self, results):
res = []
tree = ET.fromstring(results)
for row in tree.findall(".//{http://www.w3.org/2005/sparql-results#}result"):
d = {}
for element in row:
#print element.tag # should be "binding"
key = element.attrib['name']
value = element[0].text
d[key] = value
res.append(d)
return res
def construct(self, query):
if self.storetype in (self.SQLITE,self.SLEEPYCAT):
#print(query)
g = self._getcontextgraph()
res = g.query(query)
result = self._filter_null_datatype(res.graph)
del g
del res
#print("-" * 70)
#print(result.serialize(format="turtle").decode('utf-8'))
#print("-" * 70)
return result
else:
# query = " ".join(query.split()) # normalize space
url = self._endpoint_url()
if not self.context:
url += "?"
url += "query=" + quote(query)
try:
r = requests.get(
url)
format = "xml"
headers = {'Accept': self.contenttype[format]}
resp = requests.get(url, headers=headers, data=query)
resp.raise_for_status()
result = Graph()
result.parse(data=resp.text,format=format)
return result
except requests.exceptions.HTTPError as e:
raise SparqlError(e.response.text)
def clear(self):
if self.storetype in (self.SQLITE,self.SLEEPYCAT):
for (s,p,o) in self._getcontextgraph():
# print("Removing %s %s %s" % (s,p,o))
self.graph.remove((s,p,o))
self.graph.commit()
else:
try:
url = self._statements_url()
resp = requests.delete(url)
resp.raise_for_status()
if self.storetype == self.FUSEKI and self.context is None:
self.context = "urn:x-arq:UnionGraph"
url = self._statements_url()
resp = requests.delete(url)
resp.raise_for_status()
self.context = None
except requests.exceptions.ConnectionError as e:
raise TriplestoreError(
"Triplestore %s not responding: %s" % (url, e))
except requests.exceptions.HTTPError as e:
if (self.storetype == self.FUSEKI) and ("No such graph" in str(e)):
pass
else:
raise TriplestoreError(
"Triplestore %s returns error: %s" % (url, e))
def triple_count(self):
if self.storetype in (self.SQLITE,self.SLEEPYCAT):
g = self._getcontextgraph()
if self.storetype == self.SQLITE:
return len(list(g)) # bug? otherwise returns # of unique subjects
else:
return len(g)
elif self.storetype == self.SESAME:
if self.context:
url = "%s/repositories/%s/size?context=<%s>" % (
self.location, self.repository, self.context)
else:
url = "%s/repositories/%s/size" % (self.location, self.repository)
ret = requests.get(url)
return int(ret.text)
else:
# For stores that doesn't provide a HTTP API for
# retrieving the size of a repository, we must get the
# entire repo and count the number of triples (by counting
# newlines). This is obviously slow. Maybe a faster way is
# a SPARQL COUNT() query?
if self.context:
try:
tmp = self.get_serialized(format="nt")
if tmp:
return tmp.count("\n") + 1
else:
return 0
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
return 0
else:
raise e
else:
orig_ctx = self.context
tmp = self.get_serialized(format="nt")
if tmp:
default_triples = tmp.count("\n") + 1
else:
default_triples = 0
# union of all named graphs, does not (in default config)
# include the default graph
self.context = "urn:x-arq:UnionGraph"
tmp = self.get_serialized(format="nt")
if tmp:
named_graph_triples = tmp.count("\n") + 1
else:
named_graph_triples = 0
default_triples += named_graph_triples
self.context = orig_ctx
return default_triples
# def clear_subject(self, subject):
# #print "Deleting all triples where subject is %s from %s" % (subject, self.statements_url)
# req = Request(self.statements_url + "?subj=%s" % subject)
# req.get_method = lambda : "DELETE"
# return self.__urlopen(req)
def add_graph(self, graph):
"""Prepares adding a rdflib.Graph to the store (use commit to actually store it)"""
self.pending_graph += graph
def add_triple(self, xxx_todo_changeme):
"""Prepares adding a single rdflib triple to the store (use
commit to actually store it)"""
(s, p, o) = xxx_todo_changeme
self.pending_graph.add((s, p, o))
def commit(self):
if len(self.pending_graph) == 0:
return
# print "Committing %s triples to %s" % (len(self.pending_graph), self.statements_url)
data = self.pending_graph.serialize(format="nt")
# RDFlibs nt serializer mistakenly serializes to UTF-8, not
# the unicode escape sequence format mandated by the ntriples
# spec -- fix this:
# let's hope it's already fixed
# data = ''.join([ord(c) > 127 and '\u%04X' % ord(c) or c for c in data.decode('utf-8')])
# reinitialize pending_graph
self.pending_graph = Graph()
for prefix, namespace in list(self.namespaces.items()):
self.pending_graph.bind(prefix, namespace)
return self.add_serialized(data, "nt")
def add_serialized_file(self, filename, format=None):
if self.storetype in (self.SQLITE,self.SLEEPYCAT):
g = self._getcontextgraph()
g.parse(source=filename,format=format)
g.commit()
else:
if self.communication == self.CURL:
# initialize opt
self.__curl(opt)
else:
# initialize req
with open(filename, "rb") as fp:
resp = requests.post(self._statements_url(),
headers={'Content-Type':
self.contenttype[format] + ";charset=UTF-8"},
data=fp)
resp.raise_for_status()
def add_serialized(self, data, format):
if self.storetype in (self.SQLITE,self.SLEEPYCAT):
g = self._getcontextgraph()
g.parse(data=data,format=format)
g.commit()
else:
if format == "turtle" and self.storetype == self.SESAME:
# Sesame has a problem with turtle like the following:
#
# """@prefix : <http://example.org/doc/>
#
# :1 a :Document"""
#
# which gets interpreted like the subject is a predicate like
# "1"^^xsd:integer
#
# Therefore, we convert it to nt prior to posting
g = Graph()
g.parse(data=data,format="turtle")
data = g.serialize(format="nt")
if isinstance(data,str):
# data = data.encode('ascii',errors="ignore")
data = data.encode('utf-8')
# pass
if self.communication == self.CURL:
tmp = mktemp()
with open(tmp, "wb") as fp:
fp.write(data)
self.add_serialized_file(tmp, format)
os.unlink(tmp)
else:
# Requests 1.2 has a bug that leads to duplicated
# Content-type headers under at least python 3, and
# under py 3.3 this causes problem with both fuseki
# and sesame. see end of prepare_body in models.py
# (line 381). one way of working around this bug is
# to use a streaming request, so we wrap our data in a
# file-like object. All ways are good except the bad.
datastream = BytesIO(data)
datastream.len = len(data)
headers={'Content-Type':
self.contenttype[format] + "; charset=UTF-8"}
resp = requests.post(self._statements_url(),
headers=headers,
data=datastream)
if resp.status_code >= 400:
print("Something went wrong posting to %s" % self._statements_url())
print(resp.text.encode('latin-1',errors='xmlcharrefreplace'))
resp.raise_for_status()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import hashlib
import json
from datetime import datetime
from ferenda import util
class DocumentEntry(object):
"""This class has two primary uses -- it is used to represent and store
aspects of the downloading of each document (when it was initially
downloaded, optionally updated, and last checked, as well as the URL
from which it was downloaded). It's also used by the news_* methods
to encapsulate various aspects of a document entry in an atom
feed. Some properties and methods are used by both of these use
cases, but not all."""
orig_created = None
"""The first time we fetched the resource from it's original location."""
orig_updated = None
"""The last time the content at the original location of the
resource was changed."""
orig_checked = None
"""The last time we accessed the original location of this
resource, regardless of wheter this led to an update."""
orig_url = None
"""The main url from where we fetched this resource."""
published = None
"""The date our parsed/processed version of the document was published."""
updated = None
"""The last time our parsed/processed version changed in any way
(due to the original content being updated, or due to changes
in our parsing functionality."""
url = None
"""The URL to the browser-ready version of the page, equivalent to
DocumentEntry.generated_url(basefile)."""
title = None
"""TBW"""
summary = None
"""TBW"""
content = None
link = None
def __init__(self, path=None):
def myhook(d):
for key in ('orig_created', 'orig_updated', 'orig_checked', 'published', 'updated'):
if key in d and d[key]:
try:
dt = datetime.strptime(d[key], '%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
# no fractional part
dt = datetime.strptime(d[key], '%Y-%m-%dT%H:%M:%S')
d[key] = dt
return d
if path and os.path.exists(path):
with open(path) as fp:
d = json.load(fp, object_hook=myhook)
self.__dict__.update(d)
self._path = path
else:
self.id = None
self.orig_updated = None
self.orig_checked = None
self.orig_url = None
self.published = None
self.updated = None
self.title = None
self.summary = None
self.url = None
self.content = None
if path:
self._path = path
# Content src="...": A link to the actual document, or the
# content inline (Source or refined version?)
self.content = {'src': None, 'type': None, 'markup': None,
'hash': None}
# Link rel="alternate": The metadata for this document (and
# included resources)
self.link = {'href': None, 'type': None, 'length': None, 'hash': None}
def __repr__(self):
return '<%s id=%s>' % (self.__class__.__name__,self.id)
def save(self, path=None):
def mydefault(obj):
if isinstance(obj,datetime):
return obj.isoformat()
raise TypeError("%r is not JSON serializable" % obj)
if not path:
path = self._path # better be there
d = dict((k,v) for (k,v) in self.__dict__.items() if k[0] != "_")
util.ensure_dir(path)
with open(path,"w") as fp:
json.dump(d, fp, default=mydefault, indent=2, sort_keys=True)
# If inline=True, the contents of filename is included in the Atom
# entry. Otherwise, it just references it.
#
# Note that you can only have one content element.
def set_content(self, filename, url, mimetype=None, inline=False):
if not mimetype:
mimetype = self.guess_type(filename)
self.content['type'] = mimetype
if inline:
assert mimetype == 'xhtml', "Can't inline non-xhtml content"
with open(filename) as fp:
self.content['markup'] = fp.read()
else:
self.content['src'] = url
self.content['hash'] = "md5:%s" % self.calculate_md5(filename)
def set_link(self, filename, url, mimetype=None):
if not mimetype:
mimetype = self.guess_type(filename)
self.link['href'] = url
self.link['type'] = mimetype
self.link['length'] = os.path.getsize(filename)
self.link['hash'] = "md5:%s" % self.calculate_md5(filename)
def calculate_md5(self, filename):
c = hashlib.md5()
with open(filename, 'rb') as fp:
c.update(fp.read())
return c.hexdigest()
def guess_type(self, filename):
exts = {'.pdf': 'application/pdf',
'.rdf': 'application/rdf+xml',
'.html': 'text/html',
'.xhtml': 'application/html+xml'}
for ext, mimetype in list(exts.items()):
if filename.endswith(ext):
return mimetype
return "application/octet-stream"
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
"""This module finds references to legal sources (including individual
sections, eg 'Upphovsrättslag (1960:729) 49 a §') in plaintext"""
import sys
import os
import re
import codecs
import traceback
from io import StringIO
from pprint import pprint
import hashlib
import locale
import logging
import shutil
import tempfile
# 3rdparty libs
import pyparsing
# needed early
from . import util
external_simpleparse_state = None
try:
from simpleparse.parser import Parser
from simpleparse.stt.TextTools.TextTools import tag
except ImportError:
# Mimic the simpleparse interface (the very few parts we're using)
# but call external python 2.7 processes behind the scene.
external_simpleparse_state = tempfile.mkdtemp()
# external_simpleparse_state = "simpleparse.tmp"
python_exe = os.environ.get("FERENDA_PYTHON2_FALLBACK",
"python2.7")
buildtagger_script = external_simpleparse_state+os.sep+"buildtagger.py"
util.writefile(buildtagger_script, """import sys,os
if sys.version_info >= (3,0,0):
raise OSError("This is python %s, not python 2.6 or 2.7!" % sys.version_info)
declaration = sys.argv[1] # md5 sum of the entire content of declaration
production = sys.argv[2] # short production name
picklefile = "%s-%s.pickle" % (declaration, production)
from simpleparse.parser import Parser
from simpleparse.stt.TextTools.TextTools import tag
import cPickle as pickle
with open(declaration,"rb") as fp:
p = Parser(fp.read(), production)
t = p.buildTagger(production)
with open(picklefile,"wb") as fp:
pickle.dump(t,fp)""")
tagstring_script = external_simpleparse_state+os.sep+"tagstring.py"
util.writefile(tagstring_script,"""import sys, os
if sys.version_info >= (3,0,0):
raise OSError("This is python %s, not python 2.6 or 2.7!" % sys.version_info)
pickled_tagger = sys.argv[1] # what buildtagger.py returned -- full path
full_text_path = sys.argv[2]
text_checksum = sys.argv[3] # md5 sum of text, just the filename
picklefile = "%s-%s.pickle" % (pickled_tagger, text_checksum)
from simpleparse.stt.TextTools.TextTools import tag
import cPickle as pickle
with open(pickled_tagger) as fp:
t = pickle.load(fp)
with open(full_text_path, "rb") as fp:
text = fp.read()
tagged = tag(text, t, 0, len(text))
with open(picklefile,"wb") as fp:
pickle.dump(tagged,fp)
""")
class Parser(object):
def __init__(self, declaration, root='root', prebuilts=(), definitionSources=[]):
# 2. dump declaration to a tmpfile read by the script
c = hashlib.md5()
c.update(declaration)
self.declaration_md5 = c.hexdigest()
declaration_filename = "%s/%s" % (external_simpleparse_state,
self.declaration_md5)
with open(declaration_filename,"wb") as fp:
fp.write(declaration)
def buildTagger(self, production=None, processor=None):
pickled_tagger = "%s/%s-%s.pickle" % (external_simpleparse_state,
self.declaration_md5,
production)
if not os.path.exists(pickled_tagger):
# 3. call the script with python 27 and production
cmdline = "%s %s %s/%s %s" % (python_exe,
buildtagger_script,
external_simpleparse_state,
self.declaration_md5,
production)
util.runcmd(cmdline, require_success=True)
# 4. the script builds tagtable and dumps it to a pickle file
assert os.path.exists(pickled_tagger)
return pickled_tagger # filename instead of tagtable struct
def tag(text, tagtable, sliceleft, sliceright):
c = hashlib.md5()
c.update(text)
text_checksum = c.hexdigest()
pickled_tagger = tagtable # remember, not a real tagtable struct
pickled_tagged = "%s-%s.pickle" % (pickled_tagger, text_checksum)
if not os.path.exists(pickled_tagged):
# 2. Dump text as string
full_text_path = "%s/%s.txt" % (os.path.dirname(pickled_tagger),
text_checksum)
with open(full_text_path, "wb") as fp:
fp.write(text)
# 3. call script (that loads the pickled tagtable + string
# file, saves tagged text as pickle)
util.runcmd("%s %s %s %s %s" %
(python_exe,
tagstring_script,
pickled_tagger,
full_text_path,
text_checksum),
require_success = True)
# 4. load tagged text pickle
with open(pickled_tagged,"rb") as fp:
res = pickle.load(fp)
return res
import six
from six.moves import cPickle as pickle
from rdflib import Graph, BNode, Literal, Namespace, URIRef, RDF, RDFS
# my own libraries
from .elements import UnicodeElement, PredicateType, Link, LinkSubject, serialize
# The charset used for the bytestrings that is sent to/from
# simpleparse (which does not handle unicode)
# Choosing utf-8 makes § a two-byte character, which does not work well
SP_CHARSET = 'iso-8859-1'
log = logging.getLogger('lr')
class NodeTree:
"""Encapsuates the node structure from mx.TextTools in a tree oriented interface"""
def __init__(self, root, data, offset=0, isRoot=True):
self.data = data
self.root = root
self.isRoot = isRoot
self.offset = offset
def __getattr__(self, name):
if name == "text":
return self.data.decode(SP_CHARSET)
elif name == "tag":
return (self.isRoot and 'root' or self.root[0])
elif name == "nodes":
res = []
l = (self.isRoot and self.root[1] or self.root[3])
if l:
for p in l:
res.append(NodeTree(p, self.data[p[1] -
self.offset:p[2] - self.offset], p[1], False))
return res
else:
raise AttributeError
class ParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Lite om hur det hela funkar: Att hitta referenser i löptext är en
# tvåstegsprocess.
#
# I det första steget skapar simpleparse en nodstruktur från indata
# och en lämplig ebnf-grammatik. Väldigt lite kod i den här modulen
# hanterar första steget, simpleparse gör det tunga
# jobbet. Nodstrukturen kommer ha noder med samma namn som de
# produktioner som definerats i ebnf-grammatiken.
#
# I andra steget gås nodstrukturen igenom och omvandlas till en lista
# av omväxlande unicode- och Link-objekt. Att skapa Link-objekten är
# det svåra, och det mesta jobbet görs av formatter_dispatch. Den
# tittar på varje nod och försöker hitta ett lämpligt sätt att
# formattera den till ett Link-objekt med en uri-property. Eftersom
# vissa produktioner ska resultera i flera länkar och vissa bara i en
# kan detta inte göras av en enda formatteringsfunktion. För de enkla
# fallen räcker den generiska formatteraren format_tokentree till, men
# för svårare fall skrivs separata formatteringsfunktioner. Dessa har
# namn som matchar produktionerna (exv motsvaras produktionen
# ChapterSectionRefs av funktionen format_ChapterSectionRefs).
#
# Koden är tänkt att vara generell för all sorts referensigenkänning i
# juridisk text. Eftersom den växt från kod som bara hanterade rena
# lagrumsreferenser är det ganska mycket kod som bara är relevant för
# igenkänning av just svenska lagrumsänvisningar så som de förekommer
# i SFS. Sådana funktioner/avsnitt är markerat med "SFS-specifik
# [...]" eller "KOD FÖR LAGRUM"
class LegalRef:
# Kanske detta borde vara 1,2,4,8 osv, så att anroparen kan be om
# LAGRUM | FORESKRIFTER, och så vi kan definera samlingar av
# vanliga kombinationer (exv ALL_LAGSTIFTNING = LAGRUM |
# KORTLAGRUM | FORESKRIFTER | EGLAGSTIFTNING)
LAGRUM = 1 # hänvisningar till lagrum i SFS
KORTLAGRUM = 2 # SFS-hänvisningar på kortform
FORESKRIFTER = 3 # hänvisningar till myndigheters författningssamlingar
EGLAGSTIFTNING = 4 # EG-fördrag, förordningar och direktiv
INTLLAGSTIFTNING = 5 # Fördrag, traktat etc
FORARBETEN = 6 # proppar, betänkanden, etc
RATTSFALL = 7 # Rättsfall i svenska domstolar
MYNDIGHETSBESLUT = 8 # Myndighetsbeslut (JO, ARN, DI...)
EGRATTSFALL = 9 # Rättsfall i EG-domstolen/förstainstansrätten
INTLRATTSFALL = 10 # Europadomstolen
# re_urisegments = re.compile(r'([\w]+://[^/]+/[^\d]*)(\d+:(bih\. |N|)?\d+( s\.\d+|))#?(K(\d+)|)(P(\d+)|)(S(\d+)|)(N(\d+)|)')
re_urisegments = re.compile(r'([\w]+://[^/]+/[^\d]*)(\d+:(bih\.[_ ]|N|)?\d+([_ ]s\.\d+|))#?(K([a-z0-9]+)|)(P([a-z0-9]+)|)(S(\d+)|)(N(\d+)|)')
re_escape_compound = re.compile(
r'\b(\w+-) (och) (\w+-?)(lagen|förordningen)\b', re.UNICODE)
re_escape_named = re.compile(r'\B(lagens?|balkens?|förordningens?|formens?|ordningens?|kungörelsens?|stadgans?)\b', re.UNICODE)
re_descape_compound = re.compile(
r'\b(\w+-)_(och)_(\w+-?)(lagen|förordningen)\b', re.UNICODE)
re_descape_named = re.compile(r'\|(lagens?|balkens?|förordningens?|formens?|ordningens?|kungörelsens?|stadgans?)')
re_xmlcharref = re.compile("&#\d+;")
def __init__(self, *args):
if not os.path.sep in __file__:
scriptdir = os.getcwd()
else:
scriptdir = os.path.dirname(__file__)
#n3file = os.path.sep.join([scriptdir,"etc","sfs-extra.n3"])
#n3url = "file://" + n3file.replace("\\","/")
#print "scriptdir: %s" % scriptdir
#print "n3file: %s" % n3file
#print "n3url: %s" % n3url
self.graph = Graph()
n3file = os.path.relpath(scriptdir + "/res/etc/sfs-extra.n3")
# print "loading n3file %s" % n3file
self.graph.load(n3file, format="n3")
self.roots = []
self.uriformatter = {}
self.decl = "" # try to make it unicode clean all the way
self.namedlaws = {}
self.load_ebnf(scriptdir + "/res/etc/base.ebnf")
self.args = args
if self.LAGRUM in args:
productions = self.load_ebnf(scriptdir + "/res/etc/lagrum.ebnf")
for p in productions:
self.uriformatter[p] = self.sfs_format_uri
self.namedlaws.update(self.get_relations(RDFS.label))
self.roots.append("sfsrefs")
self.roots.append("sfsref")
if self.KORTLAGRUM in args:
# om vi inte redan laddat lagrum.ebnf måste vi göra det
# nu, eftersom kortlagrum.ebnf beror på produktioner som
# definerats där
if not self.LAGRUM in args:
self.load_ebnf(scriptdir + "/res/etc/lagrum.ebnf")
productions = self.load_ebnf(
scriptdir + "/res/etc/kortlagrum.ebnf")
for p in productions:
self.uriformatter[p] = self.sfs_format_uri
DCT = Namespace("http://purl.org/dc/terms/")
d = self.get_relations(DCT['alternate'])
self.namedlaws.update(d)
# lawlist = [x.encode(SP_CHARSET) for x in list(d.keys())]
lawlist = list(d.keys())
# Make sure longer law abbreviations come before shorter
# ones (so that we don't mistake "3 § MBL" for "3 § MB"+"L")
# lawlist.sort(cmp=lambda x, y: len(y) - len(x))
lawlist.sort(key=len,reverse=True)
lawdecl = "LawAbbreviation ::= ('%s')\n" % "'/'".join(lawlist)
self.decl += lawdecl
self.roots.insert(0, "kortlagrumref")
if self.EGLAGSTIFTNING in args:
productions = self.load_ebnf(scriptdir + "/res/etc/eglag.ebnf")
for p in productions:
self.uriformatter[p] = self.eglag_format_uri
self.roots.append("eglagref")
if self.FORARBETEN in args:
productions = self.load_ebnf(
scriptdir + "/res/etc/forarbeten.ebnf")
for p in productions:
self.uriformatter[p] = self.forarbete_format_uri
self.roots.append("forarbeteref")
if self.RATTSFALL in args:
productions = self.load_ebnf(scriptdir + "/res/etc/rattsfall.ebnf")
for p in productions:
self.uriformatter[p] = self.rattsfall_format_uri
self.roots.append("rattsfallref")
if self.EGRATTSFALL in args:
productions = self.load_ebnf(scriptdir + "/res/etc/egratt.ebnf")
for p in productions:
self.uriformatter[p] = self.egrattsfall_format_uri
self.roots.append("ecjcaseref")
rootprod = "root ::= (%s/plain)+\n" % "/".join(self.roots)
self.decl += rootprod
self.parser = Parser(self.decl.encode(SP_CHARSET), "root")
self.tagger = self.parser.buildTagger("root")
# util.writefile("tagger.tmp", repr(self.tagger), SP_CHARSET)
# print "tagger length: %d" % len(repr(self.tagger))
self.verbose = False
self.depth = 0
# SFS-specifik kod
self.currentlaw = None
self.currentchapter = None
self.currentsection = None
self.currentpiece = None
self.lastlaw = None
self.currentlynamedlaws = {}
def load_ebnf(self, file):
"""Laddar in produktionerna i den angivna filen i den
EBNF-deklaration som används, samt returnerar alla
*Ref och *RefId-produktioner"""
# base.ebnf contains 0x1A, ie the EOF character on windows,
# therefore we need to read it in binary mode
f = open(file, 'rb')
# assume our ebnf files use the same charset
content = f.read(os.stat(file).st_size).decode(SP_CHARSET)
self.decl += content
f.close()
return [x.group(1) for x in re.finditer(r'(\w+(Ref|RefID))\s*::=', content)]
def get_relations(self, predicate):
d = {}
for obj, subj in self.graph.subject_objects(predicate):
d[six.text_type(subj)] = six.text_type(obj)
return d
def parse(self, indata, baseuri="http://rinfo.lagrummet.se/publ/sfs/9999:999#K9P9S9P9", predicate=None):
assert isinstance(indata, six.text_type)
if indata == "":
return indata # this actually triggered a bug...
# h = hashlib.sha1()
# h.update(indata)
# print "Called with %r (%s) (%s)" % (indata, h.hexdigest(), self.verbose)
self.predicate = predicate
self.baseuri = baseuri
if baseuri:
m = self.re_urisegments.match(baseuri)
if m:
self.baseuri_attributes = {'baseuri': m.group(1),
'law': m.group(2),
'chapter': m.group(6),
'section': m.group(8),
'piece': m.group(10),
'item': m.group(12)}
else:
self.baseuri_attributes = {'baseuri': baseuri}
else:
self.baseuri_attributes = {}
# Det är svårt att få EBNF-grammatiken att känna igen
# godtyckliga ord som slutar på ett givet suffix (exv
# 'bokföringslagen' med suffixet 'lagen'). Därför förbehandlar
# vi indatasträngen och stoppar in ett '|'-tecken innan vissa
# suffix. Vi transformerar även 'Radio- och TV-lagen' till
# 'Radio-_och_TV-lagen'
#
# FIXME: Obviously, this shouldn't be done in a general class,
# but rather in a subclas or via proxy/adapter
fixedindata = indata # FIXME: Nonsensical
if self.LAGRUM in self.args:
fixedindata = self.re_escape_compound.sub(
r'\1_\2_\3\4', fixedindata)
fixedindata = self.re_escape_named.sub(r'|\1', fixedindata)
# print "After: %r" % type(fixedindata)
# SimpleParse har inget stöd för unicodesträngar, så vi
# konverterar intdatat till en bytesträng. Tyvärr får jag inte
# det hela att funka med UTF8, så vi kör xml character
# references istället
fixedindata = fixedindata.encode(SP_CHARSET, 'xmlcharrefreplace')
# Parsea texten med TextTools.tag - inte det enklaste sättet
# att göra det, men om man gör enligt
# Simpleparse-dokumentationen byggs taggertabellen om för
# varje anrop till parse()
if self.verbose:
print(("calling tag with '%s'" % (fixedindata.decode(SP_CHARSET))))
# print "tagger length: %d" % len(repr(self.tagger))
taglist = tag(fixedindata, self.tagger, 0, len(fixedindata))
result = []
root = NodeTree(taglist, fixedindata)
for part in root.nodes:
if part.tag != 'plain' and self.verbose:
sys.stdout.write(self.prettyprint(part))
if part.tag in self.roots:
self.clear_state()
# self.verbose = False
result.extend(self.formatter_dispatch(part))
else:
assert part.tag == 'plain', "Tag is %s" % part.tag
result.append(part.text)
# clear state
if self.currentlaw is not None:
self.lastlaw = self.currentlaw
self.currentlaw = None
if taglist[-1] != len(fixedindata):
log.error('Problem (%d:%d) with %r / %r' % (
taglist[-1] - 8, taglist[-1] + 8, fixedindata, indata))
raise ParseError(
"parsed %s chars of %s (...%s...)" % (taglist[-1], len(indata),
indata[(taglist[-1] - 2):taglist[-1] + 3]))
# Normalisera resultatet, dvs konkatenera intilliggande
# textnoder, och ta bort ev '|'-tecken som vi stoppat in
# tidigare.
normres = []
for i in range(len(result)):
if not self.re_descape_named.search(result[i]):
node = result[i]
else:
if self.LAGRUM in self.args:
text = self.re_descape_named.sub(r'\1', result[i])
text = self.re_descape_compound.sub(r'\1 \2 \3\4', text)
if isinstance(result[i], Link):
# Eftersom Link-objekt är immutable måste vi skapa
# ett nytt och kopiera dess attribut
if hasattr(result[i], 'predicate'):
node = LinkSubject(text, predicate=result[i].predicate,
uri=result[i].uri)
else:
node = Link(text, uri=result[i].uri)
else:
node = text
if (len(normres) > 0
and not isinstance(normres[-1], Link)
and not isinstance(node, Link)):
normres[-1] += node
else:
normres.append(node)
# and finally...
for i in range(len(normres)):
if isinstance(normres[i], Link):
# deal with these later
pass
else:
normres[i] = self.re_xmlcharref.sub(
self.unescape_xmlcharref, normres[i])
return normres
def unescape_xmlcharref(self, m):
# print "Changing %r to a %r" % (m.group(0)[2:-1], unichr(int(m.group(0)[2:-1])))
return chr(int(m.group(0)[2:-1]))
def find_attributes(self, parts, extra={}):
"""recurses through a parse tree and creates a dictionary of
attributes"""
d = {}
self.depth += 1
if self.verbose:
print(
(". " * self.depth + "find_attributes: starting with %s" % d))
if extra:
d.update(extra)
for part in parts:
current_part_tag = part.tag.lower()
if current_part_tag.endswith('refid'):
if ((current_part_tag == 'singlesectionrefid') or
(current_part_tag == 'lastsectionrefid')):
current_part_tag = 'sectionrefid'
d[current_part_tag[:-5]] = part.text.strip()
if self.verbose:
print((". " * self.depth +
"find_attributes: d is now %s" % d))
if part.nodes:
d.update(self.find_attributes(part.nodes, d))
if self.verbose:
print((". " * self.depth + "find_attributes: returning %s" % d))
self.depth -= 1
if self.currentlaw and 'law' not in d:
d['law'] = self.currentlaw
if self.currentchapter and 'chapter' not in d:
d['chapter'] = self.currentchapter
if self.currentsection and 'section' not in d:
d['section'] = self.currentsection
if self.currentpiece and 'piece' not in d:
d['piece'] = self.currentpiece
return d
def find_node(self, root, nodetag):
"""Returns the first node in the tree that has a tag matching nodetag. The search is depth-first"""
if root.tag == nodetag: # base case
return root
else:
for node in root.nodes:
x = self.find_node(node, nodetag)
if x is not None:
return x
return None
def find_nodes(self, root, nodetag):
if root.tag == nodetag:
return [root]
else:
res = []
for node in root.nodes:
res.extend(self.find_nodes(node, nodetag))
return res
def flatten_tokentree(self, part, suffix):
"""returns a 'flattened' tokentree ie for the following tree and the suffix 'RefID'
foo->bar->BlahongaRefID
->baz->quux->Blahonga2RefID
->Blahonga3RefID
->Blahonga4RefID
this should return [BlahongaRefID, Blahonga2RefID, Blahonga3RefID, Blahonga4RefID]"""
l = []
if part.tag.endswith(suffix):
l.append(part)
if not part.nodes:
return l
for subpart in part.nodes:
l.extend(self.flatten_tokentree(subpart, suffix))
return l
def formatter_dispatch(self, part):
# print "Verbositiy: %r" % self.verbose
self.depth += 1
# Finns det en skräddarsydd formatterare?
if "format_" + part.tag in dir(self):
formatter = getattr(self, "format_" + part.tag)
if self.verbose:
print(((". " * self.depth) + "formatter_dispatch: format_%s defined, calling it" % part.tag))
res = formatter(part)
assert res is not None, "Custom formatter for %s didn't return anything" % part.tag
else:
if self.verbose:
print(((". " * self.depth) + "formatter_dispatch: no format_%s, using format_tokentree" % part.tag))
res = self.format_tokentree(part)
if res is None:
print(((". " * self.depth) +
"something wrong with this:\n" + self.prettyprint(part)))
self.depth -= 1
return res
def format_tokentree(self, part):
# This is the default formatter. It converts every token that
# ends with a RefID into a Link object. For grammar
# productions like SectionPieceRefs, which contain
# subproductions that also end in RefID, this is not a good
# function to use - use a custom formatter instead.
res = []
if self.verbose:
print(((". " * self.depth) +
"format_tokentree: called for %s" % part.tag))
# this is like the bottom case, or something
if (not part.nodes) and (not part.tag.endswith("RefID")):
res.append(part.text)
else:
if part.tag.endswith("RefID"):
res.append(self.format_generic_link(part))
elif part.tag.endswith("Ref"):
res.append(self.format_generic_link(part))
else:
for subpart in part.nodes:
if self.verbose and part.tag == 'LawRef':
print(((". " * self.depth) + "format_tokentree: part '%s' is a %s" % (subpart.text, subpart.tag)))
res.extend(self.formatter_dispatch(subpart))
if self.verbose:
print(((". " * self.depth) + "format_tokentree: returning '%s' for %s" % (res, part.tag)))
return res
def prettyprint(self, root, indent=0):
res = "%s'%s': '%s'\n" % (
" " * indent, root.tag, re.sub(r'\s+', ' ', root.text))
if root.nodes is not None:
for subpart in root.nodes:
res += self.prettyprint(subpart, indent + 1)
return res
else:
return ""
def format_generic_link(self, part, uriformatter=None):
try:
uri = self.uriformatter[part.tag](self.find_attributes([part]))
except KeyError:
if uriformatter:
uri = uriformatter(self.find_attributes([part]))
else:
uri = self.sfs_format_uri(self.find_attributes([part]))
except AttributeError:
# Normal error from eglag_format_uri
return part.text
except:
exc = sys.exc_info()
# If something else went wrong, just return the plaintext
log.warning("(unknown): Unable to format link for text %s (production %s)" % (part.text, part.tag))
return part.text
if self.verbose:
print((
(". " * self.depth) + "format_generic_link: uri is %s" % uri))
if not uri:
# the formatting function decided not to return a URI for
# some reason (maybe it was a partial/relative reference
# without a proper base uri context
return part.text
elif self.predicate:
return LinkSubject(part.text, uri=uri, predicate=self.predicate)
else:
return Link(part.text, uri=uri)
# FIXME: unify this with format_generic_link
def format_custom_link(self, attributes, text, production):
try:
uri = self.uriformatter[production](attributes)
except KeyError:
uri = self.sfs_format_uri(attributes)
if not uri:
# the formatting function decided not to return a URI for
# some reason (maybe it was a partial/relative reference
# without a proper base uri context
return part.text
elif self.predicate:
return LinkSubject(text, uri=uri, predicate=self.predicate)
else:
return Link(text, uri=uri)
################################################################
# KOD FÖR LAGRUM
def clear_state(self):
self.currentlaw = None
self.currentchapter = None
self.currentsection = None
self.currentpiece = None
def normalize_sfsid(self, sfsid):
# sometimes '1736:0123 2' is given as '1736:0123 s. 2' or
# '1736:0123.2'. This fixes that.
sfsid = re.sub(r'(\d+:\d+)\.(\d)', r'\1 \2', sfsid)
#return sfsid.replace('s. ','').replace('s.','') # more advanced normalizations to come...
return sfsid
def normalize_lawname(self, lawname):
lawname = lawname.replace('|', '').replace('_', ' ').lower()
if lawname.endswith('s'):
lawname = lawname[:-1]
return lawname
def namedlaw_to_sfsid(self, text, normalize=True):
if normalize:
text = self.normalize_lawname(text)
nolaw = [
'aktieslagen',
'anordningen',
'anordningen',
'anslagen',
'arbetsordningen',
'associationsformen',
'avfallsslagen',
'avslagen',
'avvittringsutslagen',
'bergslagen',
'beskattningsunderlagen',
'bolagen',
'bolagsordningen',
'bolagsordningen',
'dagordningen',
'djurslagen',
'dotterbolagen',
'emballagen',
'energislagen',
'ersättningsformen',
'ersättningsslagen',
'examensordningen',
'finansbolagen',
'finansieringsformen',
'fissionsvederlagen',
'flygbolagen',
'fondbolagen',
'förbundsordningen',
'föreslagen',
'företrädesordningen',
'förhandlingsordningen',
'förlagen',
'förmånsrättsordningen',
'förmögenhetsordningen',
'förordningen',
'förslagen',
'försäkringsaktiebolagen',
'försäkringsbolagen',
'gravanordningen',
'grundlagen',
'handelsplattformen',
'handläggningsordningen',
'inkomstslagen',
'inköpssamordningen',
'kapitalunderlagen',
'klockslagen',
'kopplingsanordningen',
'låneformen',
'mervärdesskatteordningen',
'nummerordningen',
'omslagen',
'ordalagen',
'pensionsordningen',
'renhållningsordningen',
'representationsreformen',
'rättegångordningen',
'rättegångsordningen',
'rättsordningen',
'samordningen',
'samordningen',
'skatteordningen',
'skatteslagen',
'skatteunderlagen',
'skolformen',
'skyddsanordningen',
'slagen',
'solvärmeanordningen',
'storslagen',
'studieformen',
'stödformen',
'stödordningen',
'stödordningen',
'säkerhetsanordningen',
'talarordningen',
'tillslagen',
'tivolianordningen',
'trafikslagen',
'transportanordningen',
'transportslagen',
'trädslagen',
'turordningen',
'underlagen',
'uniformen',
'uppställningsformen',
'utvecklingsbolagen',
'varuslagen',
'verksamhetsformen',
'vevanordningen',
'vårdformen',
'ägoanordningen',
'ägoslagen',
'ärendeslagen',
'åtgärdsförslagen',
]
if text in nolaw:
return None
if text in self.currentlynamedlaws:
return self.currentlynamedlaws[text]
elif text in self.namedlaws:
return self.namedlaws[text]
else:
if self.verbose:
# print "(unknown): I don't know the ID of named law [%s]" % text
log.warning(
"(unknown): I don't know the ID of named law [%s]" % text)
return None
def sfs_format_uri(self, attributes):
piecemappings = {'första': '1',
'andra': '2',
'tredje': '3',
'fjärde': '4',
'femte': '5',
'sjätte': '6',
'sjunde': '7',
'åttonde': '8',
'nionde': '9'}
keymapping = {'lawref': 'L',
'chapter': 'K',
'section': 'P',
'piece': 'S',
'item': 'N',
'itemnumeric': 'N',
'element': 'O',
'sentence': 'M', # is this ever used?
}
attributeorder = ['law', 'lawref', 'chapter', 'section',
'element', 'piece', 'item', 'itemnumeric', 'sentence']
if 'law' in attributes:
if attributes['law'].startswith('http://'):
res = ''
else:
res = 'http://rinfo.lagrummet.se/publ/sfs/'
else:
if 'baseuri' in self.baseuri_attributes:
res = self.baseuri_attributes['baseuri']
else:
res = ''
resolvetobase = True
addfragment = False
justincase = None
for key in attributeorder:
if key in attributes:
resolvetobase = False
val = attributes[key]
elif (resolvetobase and key in self.baseuri_attributes):
val = self.baseuri_attributes[key]
else:
val = None
if val:
if not isinstance(val,six.text_type):
val = val.decode(SP_CHARSET)
if addfragment:
res += '#'
addfragment = False
if (key in ['piece', 'itemnumeric', 'sentence'] and val in piecemappings):
res += '%s%s' % (
keymapping[key], piecemappings[val.lower()])
else:
if key == 'law':
val = self.normalize_sfsid(val)
val = val.replace(" ", "_")
res += val
addfragment = True
else:
if justincase:
res += justincase
justincase = None
val = val.replace(" ", "")
val = val.replace("\n", "")
val = val.replace("\r", "")
res += '%s%s' % (keymapping[key], val)
else:
if key == 'piece':
justincase = "S1"
return res
def format_ChapterSectionRefs(self, root):
assert(root.tag == 'ChapterSectionRefs')
assert(len(root.nodes) == 3) # ChapterRef, wc, SectionRefs
part = root.nodes[0]
self.currentchapter = part.nodes[0].text.strip()
if self.currentlaw:
res = [self.format_custom_link({'law':self.currentlaw,
'chapter':self.currentchapter},
part.text,
part.tag)]
else:
res = [self.format_custom_link({'chapter':self.currentchapter},
part.text,
part.tag)]
res.extend(self.formatter_dispatch(root.nodes[1]))
res.extend(self.formatter_dispatch(root.nodes[2]))
self.currentchapter = None
return res
def format_ChapterSectionPieceRefs(self, root):
assert(root.nodes[0].nodes[0].tag == 'ChapterRefID')
self.currentchapter = root.nodes[0].nodes[0].text.strip()
res = []
for node in root.nodes:
res.extend(self.formatter_dispatch(node))
return res
def format_LastSectionRef(self, root):
# the last section ref is a bit different, since we want the
# ending double section mark to be part of the link text
assert(root.tag == 'LastSectionRef')
assert(len(root.nodes) == 3) # LastSectionRefID, wc, DoubleSectionMark
sectionrefid = root.nodes[0]
sectionid = sectionrefid.text
return [self.format_generic_link(root)]
def format_SectionPieceRefs(self, root):
assert(root.tag == 'SectionPieceRefs')
self.currentsection = root.nodes[0].nodes[0].text.strip()
res = [self.format_custom_link(self.find_attributes([root.nodes[2]]),
"%s %s" % (root.nodes[0]
.text, root.nodes[2].text),
root.tag)]
for node in root.nodes[3:]:
res.extend(self.formatter_dispatch(node))
self.currentsection = None
return res
def format_SectionPieceItemRefs(self, root):
assert(root.tag == 'SectionPieceItemRefs')
self.currentsection = root.nodes[0].nodes[0].text.strip()
self.currentpiece = root.nodes[2].nodes[0].text.strip()
res = [self.format_custom_link(self.find_attributes([root.nodes[2]]),
"%s %s" % (root.nodes[0]
.text, root.nodes[2].text),
root.tag)]
for node in root.nodes[3:]:
res.extend(self.formatter_dispatch(node))
self.currentsection = None
self.currentpiece = None
return res
# This is a special case for things like '17-29 och 32 §§ i lagen
# (2004:575)', which picks out the LawRefID first and stores it in
# .currentlaw, so that find_attributes finds it
# automagically. Although now it seems to be branching out and be
# all things to all people.
def format_ExternalRefs(self, root):
assert(root.tag == 'ExternalRefs')
# print "DEBUG: start of format_ExternalRefs; self.currentlaw is %s" % self.currentlaw
lawrefid_node = self.find_node(root, 'LawRefID')
if lawrefid_node is None:
# Ok, no explicit LawRefID found, lets see if this is a named law that we have the ID for
# namedlaw_node = self.find_node(root, 'NamedLawExternalLawRef')
namedlaw_node = self.find_node(root, 'NamedLaw')
if namedlaw_node is None:
# As a last chance, this might be a reference back to a previously mentioned law ("...enligt 4 § samma lag")
samelaw_node = self.find_node(root, 'SameLaw')
assert(samelaw_node is not None)
if self.lastlaw is None:
log.warning("(unknown): found reference to \"{samma,nämnda} {lag,förordning}\", but self.lastlaw is not set")
self.currentlaw = self.lastlaw
else:
# the NamedLaw case
self.currentlaw = self.namedlaw_to_sfsid(namedlaw_node.text)
if self.currentlaw is None:
# unknow law name - in this case it's better to
# bail out rather than resolving chapter/paragraph
# references relative to baseuri (which is almost
# certainly wrong)
return [root.text]
else:
self.currentlaw = lawrefid_node.text
if self.find_node(root, 'NamedLaw'):
namedlaw = self.normalize_lawname(
self.find_node(root, 'NamedLaw').text)
# print "remember that %s is %s!" % (namedlaw, self.currentlaw)
self.currentlynamedlaws[namedlaw] = self.currentlaw
#print "DEBUG: middle of format_ExternalRefs; self.currentlaw is %s" % self.currentlaw
if self.lastlaw is None:
#print "DEBUG: format_ExternalRefs: setting self.lastlaw to %s" % self.currentlaw
self.lastlaw = self.currentlaw
# if the node tree only contains a single reference, it looks
# better if the entire expression, not just the
# chapter/section part, is linked. But not if it's a
# "anonymous" law ('1 § i lagen (1234:234) om blahonga')
if (len(self.find_nodes(root, 'GenericRefs')) == 1 and
len(self.find_nodes(root, 'SectionRefID')) == 1 and
len(self.find_nodes(root, 'AnonymousExternalLaw')) == 0):
res = [self.format_generic_link(root)]
else:
res = self.format_tokentree(root)
return res
def format_SectionItemRefs(self, root):
assert(root.nodes[0].nodes[0].tag == 'SectionRefID')
self.currentsection = root.nodes[0].nodes[0].text.strip()
#res = self.formatter_dispatch(root.nodes[0]) # was formatter_dispatch(self.root)
res = self.format_tokentree(root)
self.currentsection = None
return res
def format_PieceItemRefs(self, root):
self.currentpiece = root.nodes[0].nodes[0].text.strip()
res = [self.format_custom_link(
self.find_attributes([root.nodes[2].nodes[0]]),
"%s %s" % (root.nodes[0].text, root.nodes[2].nodes[0].text),
root.tag)]
for node in root.nodes[2].nodes[1:]:
res.extend(self.formatter_dispatch(node))
self.currentpiece = None
return res
def format_ChapterSectionRef(self, root):
assert(root.nodes[0].nodes[0].tag == 'ChapterRefID')
self.currentchapter = root.nodes[0].nodes[0].text.strip()
return [self.format_generic_link(root)]
def format_AlternateChapterSectionRefs(self, root):
assert(root.nodes[0].nodes[0].tag == 'ChapterRefID')
self.currentchapter = root.nodes[0].nodes[0].text.strip()
# print "Self.currentchapter is now %s" % self.currentchapter
res = self.format_tokentree(root)
self.currentchapter = None
return res
def format_ExternalLaw(self, root):
self.currentchapter = None
return self.formatter_dispatch(root.nodes[0])
def format_ChangeRef(self, root):
id = self.find_node(root, 'LawRefID').data
return [self.format_custom_link({'lawref':id},
root.text,
root.tag)]
def format_SFSNr(self, root):
if self.baseuri is None:
sfsid = self.find_node(root, 'LawRefID').data
baseuri = 'http://rinfo.lagrummet.se/publ/sfs/%s#' % sfsid.decode(SP_CHARSET)
self.baseuri_attributes = {'baseuri': baseuri}
return self.format_tokentree(root)
def format_NamedExternalLawRef(self, root):
resetcurrentlaw = False
#print "format_NamedExternalLawRef: self.currentlaw is %r" % self.currentlaw
if self.currentlaw is None:
resetcurrentlaw = True
lawrefid_node = self.find_node(root, 'LawRefID')
if lawrefid_node is None:
self.currentlaw = self.namedlaw_to_sfsid(root.text)
else:
self.currentlaw = lawrefid_node.text
namedlaw = self.normalize_lawname(
self.find_node(root, 'NamedLaw').text)
# print "remember that %s is %s!" % (namedlaw, self.currentlaw)
self.currentlynamedlaws[namedlaw] = self.currentlaw
#print "format_NamedExternalLawRef: self.currentlaw is now %r" % self.currentlaw
#print "format_NamedExternalLawRef: self.baseuri is %r" % self.baseuri
if self.currentlaw is None: # if we can't find a ID for this law, better not <link> it
res = [root.text]
else:
res = [self.format_generic_link(root)]
#print "format_NamedExternalLawRef: self.baseuri is %r" % self.baseuri
if self.baseuri is None and self.currentlaw is not None:
#print "format_NamedExternalLawRef: setting baseuri_attributes"
# use this as the new baseuri_attributes
m = self.re_urisegments.match(self.currentlaw)
if m:
self.baseuri_attributes = {'baseuri': m.group(1),
'law': m.group(2),
'chapter': m.group(6),
'section': m.group(8),
'piece': m.group(10),
'item': m.group(12)}
else:
self.baseuri_attributes = {'baseuri': 'http://rinfo.lagrummet.se/publ/sfs/' + self.currentlaw + '#'}
if resetcurrentlaw:
if self.currentlaw is not None:
self.lastlaw = self.currentlaw
self.currentlaw = None
return res
################################################################
# KOD FÖR KORTLAGRUM
def format_AbbrevLawNormalRef(self, root):
lawabbr_node = self.find_node(root, 'LawAbbreviation')
self.currentlaw = self.namedlaw_to_sfsid(
lawabbr_node.text, normalize=False)
res = [self.format_generic_link(root)]
if self.currentlaw is not None:
self.lastlaw = self.currentlaw
self.currentlaw = None
return res
def format_AbbrevLawShortRef(self, root):
assert(root.nodes[0].tag == 'LawAbbreviation')
assert(root.nodes[2].tag == 'ShortChapterSectionRef')
self.currentlaw = self.namedlaw_to_sfsid(
root.nodes[0].text, normalize=False)
shortsection_node = root.nodes[2]
assert(shortsection_node.nodes[0].tag == 'ShortChapterRefID')
assert(shortsection_node.nodes[2].tag == 'ShortSectionRefID')
self.currentchapter = shortsection_node.nodes[0].text
self.currentsection = shortsection_node.nodes[2].text
res = [self.format_generic_link(root)]
self.currentchapter = None
self.currentsection = None
self.currentlaw = None
return res
################################################################
# KOD FÖR FORARBETEN
def forarbete_format_uri(self, attributes):
# res = self.baseuri_attributes['baseuri']
res = 'http://rinfo.lagrummet.se/'
resolvetobase = True
addfragment = False
for key, val in list(attributes.items()):
if key == 'prop':
res += "publ/prop/%s" % val
elif key == 'bet':
res += "publ/bet/%s" % val
elif key == 'skrivelse':
res += "publ/rskr/%s" % val
elif key == 'celex':
if len(val) == 8: # incorrectly formatted, uses YY instead of YYYY
val = val[0] + '19' + val[1:]
res += "ext/eur-lex/%s" % val
if 'sidnr' in attributes:
res += "#s%s" % attributes['sidnr']
return res
def format_ChapterSectionRef(self, root):
assert(root.nodes[0].nodes[0].tag == 'ChapterRefID')
self.currentchapter = root.nodes[0].nodes[0].text.strip()
return [self.format_generic_link(root)]
################################################################
# KOD FÖR EGLAGSTIFTNING
def eglag_format_uri(self, attributes):
res = 'http://rinfo.lagrummet.se/ext/celex/'
if not 'akttyp' in attributes:
if 'forordning' in attributes:
attributes['akttyp'] = 'förordning'
elif 'direktiv' in attributes:
attributes['akttyp'] = 'direktiv'
if 'akttyp' not in attributes:
raise AttributeError("Akttyp saknas")
# Om hur CELEX-nummer konstrueras
# https://www.infotorg.sema.se/infotorg/itweb/handbook/rb/hlp_celn.htm
# https://www.infotorg.sema.se/infotorg/itweb/handbook/rb/hlp_celf.htm
# Om hur länkning till EURLEX ska se ut:
# http://eur-lex.europa.eu/sv/tools/help_syntax.htm
# Absolut URI?
if 'ar' in attributes and 'lopnummer' in attributes:
sektor = '3'
rattslig_form = {'direktiv': 'L',
'förordning': 'R'}
if len(attributes['ar']) == 2:
attributes['ar'] = '19' + attributes['ar']
res += "%s%s%s%04d" % (sektor, attributes['ar'],
rattslig_form[attributes['akttyp']],
int(attributes['lopnummer']))
else:
if not self.baseuri_attributes['baseuri'].startswith(res):
# FIXME: should we warn about this?
# print "Relative reference, but base context %s is not a celex context" % self.baseuri_attributes['baseuri']
return None
if 'artikel' in attributes:
res += "#%s" % attributes['artikel']
if 'underartikel' in attributes:
res += ".%s" % attributes['underartikel']
return res
################################################################
# KOD FÖR RATTSFALL
def rattsfall_format_uri(self, attributes):
# Listan härledd från containers.n3/rattsfallsforteckningar.n3 i
# rinfoprojektets källkod - en ambitiösare lösning vore att läsa
# in de faktiska N3-filerna i en rdflib-graf.
containerid = {'NJA': '/publ/rattsfall/nja/',
'RH': '/publ/rattsfall/rh/',
'MÖD': '/publ/rattsfall/mod/',
'RÅ': '/publ/rattsfall/ra/',
'RK': '/publ/rattsfall/rk/',
'MIG': '/publ/rattsfall/mig/',
'AD': '/publ/rattsfall/ad/',
'MD': '/publ/rattsfall/md/',
'FÖD': '/publ/rattsfall/fod/'}
# res = self.baseuri_attributes['baseuri']
if 'nja' in attributes:
attributes['domstol'] = attributes['nja']
assert 'domstol' in attributes, "No court provided"
assert attributes['domstol'] in containerid, "%s is an unknown court" % attributes['domstol']
res = "http://rinfo.lagrummet.se" + containerid[attributes['domstol']]
if 'lopnr' in attributes and ":" in attributes['lopnr']:
(attributes['ar'], attributes['lopnr']) = lopnr.split(":", 1)
if attributes['domstol'] == 'NJA':
# FIXME: URIs should be based on publikationsordinal, not
# pagenumber (which this in effect is) - but this requires
# a big lookup table/database/graph with
# pagenumber-to-ordinal-mappings
res += '%ss%s' % (attributes['ar'], attributes['sidnr'])
else:
res += '%s:%s' % (attributes['ar'], attributes['lopnr'])
return res
################################################################
# KOD FÖR EGRÄTTSFALL
def egrattsfall_format_uri(self, attributes):
descriptormap = {'C': 'J', # Judgment of the Court
'T': 'A', # Judgment of the Court of First Instance
'F': 'W', # Judgement of the Civil Service Tribunal
}
# FIXME: Change this before the year 2054 (as ECJ will
# hopefully have fixed their case numbering by then)
if len(attributes['year']) == 2:
if int(attributes['year']) < 54:
year = "20" + attributes['year']
else:
year = "19" + attributes['year']
else:
year = attributes['year']
serial = '%04d' % int(attributes['serial'])
descriptor = descriptormap[attributes['decision']]
uri = "http://lagen.nu/ext/celex/6%s%s%s" % (year, descriptor, serial)
return uri
| Python |
#!/usr/bin/env python
import sys, os
sys.path.append(os.path.normpath(os.getcwd()+os.sep+os.pardir))
from ferenda import manager
manager.run(sys.argv[1:])
| Python |
#!/usr/bin/env python
from __future__ import unicode_literals
import os
import sys
sys.path.append("..")
try:
from ferenda.manager import make_wsgi_app
# FIXME: should we chdir to os.path.dirname(__file__) instead?
inifile = os.path.join(os.path.dirname(__file__), "ferenda.ini")
application = make_wsgi_app(inifile=inifile)
except ImportError as e:
exception_data = str(e)
def application(environ, start_response):
msg = """500 Internal Server Error: %s
sys.path: %r
os.getcwd(): %s""" % (exception_data, sys.path, os.getcwd())
msg = msg.encode('ascii')
start_response("500 Internal Server Error", [
("Content-Type", "text/plain"),
("Content-Length", str(len(msg)))
])
return iter([msg])
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""This module constructs URIs for a document based on the properties
of that document. Alternatively, given a URI for a document, parse the
different properties for the document"""
# system libs
import sys
import re
from pprint import pformat
# 3rdparty libs
from rdflib import Literal, Namespace, URIRef, RDF, RDFS
from rdflib import Graph
from rdflib import BNode
# my own libraries
from .legalref import LegalRef
from . import util
RPUBL = Namespace('http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#')
RINFOEX = Namespace("http://lagen.nu/terms#")
DCT = Namespace(util.ns['dct'])
# Maps keys used by the internal dictionaries that LegalRef
# constructs, which in turn are modelled after production rule names
# in the EBNF grammar.
predicate = {"type": RDF.type,
"publikation": RPUBL["rattsfallspublikation"],
"artal": RPUBL["artal"],
"lopnummer": RPUBL["publikationsordinal"],
"sidnummer": RPUBL["sidnummer"],
"law": RPUBL["fsNummer"],
"chapter": RINFOEX["kapitelnummer"],
"section": RINFOEX["paragrafnummer"],
"piece": RINFOEX["styckenummer"],
"item": RINFOEX["punktnummer"],
"myndighet": DCT["creator"],
"dnr": RPUBL["diarienummer"]}
dictkey = dict([[v, k] for k, v in list(predicate.items())])
types = {LegalRef.RATTSFALL: RPUBL["Rattsfallsreferat"],
LegalRef.LAGRUM: RPUBL["KonsolideradGrundforfattning"],
LegalRef.MYNDIGHETSBESLUT: RPUBL["Myndighetsavgorande"]}
dicttypes = dict([[v, k] for k, v in list(types.items())])
patterns = {LegalRef.RATTSFALL:
re.compile("http://rinfo.lagrummet.se/publ/rattsfall/(?P<publikation>\w+)/(?P<artal>\d+)(s(?P<sidnummer>\d+)|((:| nr | ref )(?P<lopnummer>\d+)))").match,
LegalRef.MYNDIGHETSBESLUT:
re.compile("http://rinfo.lagrummet.se/publ/beslut/(?P<myndighet>\w+)/(?P<dnr>.*)").match,
LegalRef.LAGRUM:
re.compile("http://rinfo.lagrummet.se/publ/sfs/(?P<law>\d{4}:\w+)#?(K(?P<chapter>[0-9a-z]+))?(P(?P<section>[0-9a-z]+))?(S(?P<piece>[0-9a-z]+))?(N(?P<item>[0-9a-z]+))?").match
}
# The dictionary should be a number of properties of the document we
# wish to construct the URI for, e.g:
# {"type": LegalRef.RATTSFALL,
# "publikation": "nja",
# "artal": "2004"
# "sidnr": "43"}
#
# The output is a URI string like 'http://rinfo.lagrummet.se/publ/rattsfall/nja/2004s43'
def construct(dictionary):
# Step 1: massage the data to a rdflib graph
graph = Graph()
bnode = BNode()
for key in dictionary:
if key == "type":
graph.add((bnode, RDF.type, URIRef(types[dictionary[key]])))
else:
graph.add((bnode, predicate[key], Literal(dictionary[key])))
# print graph.serialize(format="nt")
return construct_from_graph(graph)
def _first_obj(graph, subject, predicate):
l = list(graph.objects(subject, predicate))
if not l:
#raise ValueError("No objects with predicate %s found in the graph" % predicate)
return None
else:
return l[0]
def construct_from_graph(graph):
# assume every triple in the graph has the same bnode as subject
bnode = list(graph)[0][0]
assert(isinstance(bnode, BNode))
# maybe we should just move the triples into a dict keyed on predicate?
rdftype = _first_obj(graph, bnode, RDF.type)
if rdftype == RPUBL["Rattsfallsreferat"]:
publ = _first_obj(graph, bnode, RPUBL["rattsfallspublikation"])
if str(publ) == "nja":
uripart = "%s/%ss%s" % (publ,
_first_obj(graph, bnode, RPUBL["artal"]),
_first_obj(graph, bnode, RPUBL["sidnummer"]))
else:
uripart = "%s/%s:%s" % (publ,
_first_obj(graph, bnode, RPUBL["artal"]),
_first_obj(graph, bnode, RPUBL["publikationsordinal"]))
return "http://rinfo.lagrummet.se/publ/rattsfall/%s" % uripart
elif rdftype == RPUBL["KonsolideradGrundforfattning"]:
# print graph.serialize(format="n3")
attributeorder = [RINFOEX["kapitelnummer"],
RINFOEX["paragrafnummer"],
RINFOEX["styckenummer"],
RINFOEX["punktnummer"]]
signs = {RINFOEX["kapitelnummer"]: 'K',
RINFOEX["paragrafnummer"]: 'P',
RINFOEX["styckenummer"]: 'S',
RINFOEX["punktnummer"]: 'N'}
urifragment = _first_obj(graph, bnode, RPUBL["fsNummer"])
for key in attributeorder:
if _first_obj(graph, bnode, key):
if "#" not in urifragment:
urifragment += "#"
urifragment += signs[key] + _first_obj(graph, bnode, key)
return "http://rinfo.lagrummet.se/publ/sfs/%s" % urifragment
elif rdftype == RPUBL["Myndighetsavgorande"]:
return "http://rinfo.lagrummet.se/publ/beslut/%s/%s" % \
(_first_obj(graph, bnode, DCT["creator"]),
_first_obj(graph, bnode, RPUBL["diarienummer"]))
else:
raise ValueError("Don't know how to construct a uri for %s" % rdftype)
def parse(uri):
graph = parse_to_graph(uri)
dictionary = {}
for (subj, pred, obj) in graph:
if pred == RDF.type:
dictionary["type"] = dicttypes[obj]
else:
dictionary[dictkey[pred]] = str(obj)
return dictionary
def parse_to_graph(uri):
dictionary = None
for (pid, pattern) in list(patterns.items()):
m = pattern(uri)
if m:
dictionary = m.groupdict()
dictionary["type"] = pid
break
if not dictionary:
raise ValueError("Can't parse URI %s" % uri)
graph = Graph()
for key, value in list(util.ns.items()):
graph.bind(key, Namespace(value))
bnode = BNode()
for key in dictionary:
if dictionary[key] is None:
continue
if key.startswith("_"):
continue
if key == "type":
graph.add((bnode, RDF.type, URIRef(types[dictionary[key]])))
else:
graph.add((bnode, predicate[key], Literal(dictionary[key])))
return graph
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Fancy file-like-class for reading (not writing) text files by line,
paragraph, page or any other user-defined unit of text, with support
for peeking ahead and looking backwards. It can read files in
different encodings, but converts/handles everything using unicode.
Alternatively, it can be initialized from an existing (unicode) buffer"""
import os
import sys
import codecs
import copy
import unittest
import six
class TextReader(object):
UNIX = '\n'
DOS = '\r\n'
MAC = '\r'
#----------------------------------------------------------------
# Internal helper methods etc
def __init__(self, filename=None, ustring=None, encoding=None, linesep=None):
if not filename and not ustring:
raise TypeError("Must specify either filename or ustring")
# implementation of file attributes
self.closed = False
self.mode = "r+"
self.name = filename
self.newlines = None
self.softspace = 0
if encoding:
self.encoding = encoding
else:
self.encoding = 'ascii'
# Other initialization
if linesep:
self.linesep = linesep
else:
self.linesep = os.linesep
# can be changed through getiterator, if we want to iterate over anything else but lines
self.iterfunc = self.readline
self.iterargs = []
self.iterkwargs = {}
self.autostrip = False
self.autodewrap = False
self.autodehyphenate = False
self.expandtabs = True
if filename:
self.f = codecs.open(self.name, "r", self.encoding)
self.data = self.f.read()
self.f.close()
else:
assert(isinstance(ustring, six.text_type))
self.data = ustring
self.currpos = 0
self.maxpos = len(self.data)
self.lastread = ''
def __iter__(self):
# self.iterfunc = self.readline
return self
def __find(self, delimiter, startpos):
idx = self.data.find(delimiter, startpos)
if idx == -1: # not found, read until eof
res = self.data[startpos:]
newpos = startpos + len(res)
else:
res = self.data[startpos:idx]
newpos = idx + len(delimiter)
return (res, newpos)
def __rfind(self, delimiter, startpos):
idx = self.data.rfind(delimiter, 0, startpos)
if idx == -1: # not found, read until bof
res = self.data[:startpos]
newpos = 0
else:
res = self.data[idx + len(delimiter):startpos]
newpos = idx
return (res, newpos)
def __process(self, s):
if self.autostrip:
s = self.__strip(s)
if self.autodewrap:
s = self.__dewrap(s)
if self.autodehyphenate:
s = self.__dehyphenate(s)
if self.expandtabs:
s = self.__expandtabs(s)
return s
def __strip(self, s):
return s.strip()
def __dewrap(self, s):
return s.replace(self.linesep, " ")
def __dehyphenate(self, s):
return s # FIXME: implement
def __expandtabs(self, s):
return s.expandtabs(8)
#----------------------------------------------------------------
# Implementation of a file-like interface
def fflush(self):
pass
def __next__(self):
oldpos = self.currpos
# res = self.__process(self.readline())
# print "self.iterfunc is %r" % self.iterfunc
res = self.__process(self.iterfunc(*self.iterargs, **self.iterkwargs))
if self.currpos == oldpos:
raise StopIteration
else:
return res
# alias for py2 compat
next = __next__
def read(self, size=0):
self.lastread = self.data[self.currpos:self.currpos + size]
self.currpos += len(self.lastread)
return self.__process(self.lastread)
def readline(self, size=None):
# FIXME: the size arg is required for file-like interfaces,
# but we don't support it
return self.readchunk(self.linesep)
def readlines(self, sizehint=None):
# FIXME: Implement
pass
def xreadlines(self):
# FIXME: Implement
pass
def seek(self, offset, whence=0):
# FIXME: Implement support for whence
self.currpos = offset
def tell(self):
return self.currpos
def truncate(self):
return IOError("TextReaders are read-only")
def write(str):
return IOError("TextReaders are read-only")
def writelines(sequence):
return IOError("TextReaders are read-only")
#----------------------------------------------------------------
# Added convenience methods
def eof(self):
return (self.currpos == self.maxpos)
def bof(self):
return (self.currpos == 0)
def cue(self, string):
idx = self.data.find(string, self.currpos)
if idx == -1:
raise IOError("Could not find %r in file" % string)
self.currpos = idx
def cuepast(self, string):
self.cue(string)
self.currpos += len(string)
def readto(self, string):
idx = self.data.find(string, self.currpos)
if idx == -1:
raise IOError("Could not find %r in file" % string)
res = self.data[self.currpos:idx]
self.currpos = idx
return self.__process(res)
def readparagraph(self):
# consume any leading newlines
while self.peek(len(self.linesep)) == self.linesep:
self.currpos += len(self.linesep)
# read actual paragrapgh
res = self.readchunk(self.linesep * 2)
# consume any trailing lines
while self.peek(len(self.linesep)) == self.linesep:
self.currpos += len(self.linesep)
# print("readparagraph: %r" % res[:40])
return res
def readpage(self):
return self.readchunk('\f') # form feed - pdftotext generates
# these to indicate page breaks
# (other ascii oriented formats,
# like the GPL, RFCs and even some
# python source code, uses it as
# well)
def readchunk(self, delimiter):
(self.lastread, self.currpos) = self.__find(delimiter, self.currpos)
return self.__process(self.lastread)
def lastread(self):
"""returns the last chunk of data that was actually read
(i.e. the peek* and prev* methods do not affect this"""
return self.__process(self.lastread)
def peek(self, size=0):
res = self.data[self.currpos:self.currpos + size]
return self.__process(res)
def peekline(self, times=1):
return self.peekchunk(self.linesep, times)
def peekparagraph(self, times=1):
startpos = self.currpos
# consume any leading newlines
while self.peek(len(self.linesep)) == self.linesep:
self.currpos += len(self.linesep)
# read actual paragrapgh
res = self.peekchunk(self.linesep * 2, times)
# print("peekparagraph: %r" % res[:40])
self.currpos = startpos
return res
def peekchunk(self, delimiter, times=1):
oldpos = self.currpos
for i in range(times):
(res, newpos) = self.__find(delimiter, oldpos)
# print "peekchunk: newpos: %s, oldpos: %s" % (newpos,oldpos)
if newpos == oldpos:
raise IOError("Peek past end of file")
else:
oldpos = newpos
return self.__process(res)
def prev(self, size=0):
res = self.data[self.currpos - size:self.currpos]
return self.__process(res)
def prevline(self, times=1):
return self.prevchunk(self.linesep, times)
def prevparagraph(self, times=1):
return self.prevchunk(self.linesep * 2, times)
def prevchunk(self, delimiter, times=1):
oldpos = self.currpos
for i in range(times):
(res, newpos) = self.__rfind(delimiter, oldpos)
if newpos == oldpos:
raise IOError("Prev (backwards peek) past end of file")
else:
oldpos = newpos
return self.__process(res)
def getreader(self, callableObj, *args, **kwargs):
"""Enables you to treat the result of any single read*, peek*
or prev* methods as a new TextReader. Particularly useful to
process individual pages in page-oriented documents"""
res = callableObj(*args, **kwargs)
clone = copy.copy(self)
clone.data = res
clone.currpos = 0
clone.maxpos = len(clone.data)
return clone
def getiterator(self, callableObj, *args, **kwargs):
self.iterfunc = callableObj
self.iterargs = args
self.iterkwargs = kwargs
return self
| Python |
from operator import attrgetter
class NewsCriteria(object):
"""Represents a particular subset of the documents in a
repository, for the purpose of generating a news feed for that
subset.
:param basefile: A slug-like basic text label for this subset.
:param feedtitle: ...
:param selector: callable
:param key: callable
"""
def __init__(self, basefile, feedtitle, selector=None, key=None):
self.basefile = basefile
self.feedtitle = feedtitle
if not selector:
self.selector = lambda x: True
else:
assert callable(selector)
self.selector = selector
if not key:
self.key = attrgetter('updated') # or lambda x: x.updated
else:
assert callable(key)
self.key = key
self.entries = []
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# ferenda.FSMParser -
#
# You don't have to. You may create ferenda.elements objects manually, or even create
# your own lists-of-lists structure that render_xhtml can handle (or that you
# transform into xhtml/rdfa yourself).
import collections
import re
import inspect
import six
from ferenda.errors import FSMStateError
class Peekable(six.Iterator):
# inspired by recipe 19.18 in the python cookbook
def __init__(self, iterable):
self._iterable = iter(iterable)
self._cache = collections.deque()
def __iter__(self):
return self
def _fillcache(self):
while len(self._cache) < 1:
try:
self._cache.append(six.advance_iterator(self._iterable))
except IOError: # more?
raise StopIteration
def __next__(self):
self._fillcache()
result = self._cache.popleft()
return result
# useful alias
next = __next__
def peek(self):
self._fillcache()
result = self._cache[0]
return result
class FSMParser():
"""A configurable finite state machine for parsing documents
# with nested structure. You provide a set of recognizers, a set of
# constructors, a transition table and a stream of document text
# chunks, and it returns a hierarchical document object structure."""
def __init__(self):
self.debug = False
self.transitions = None # set by set_transitions
self.recognizers = None # set by set_recognizers() or set_transitions()
self.reader = None # set by parse()
# somewhat magic
self.initial_state = None
self.initial_constructor = None
# pseudo-internal
self._state_stack = []
def _debug(self,msg):
"""Prints a debug message, indented to show how far down in the nested structure we are"""
if self.debug:
stack = inspect.stack()
calling_frame = [x[3] for x in stack][1]
relative_depth = len(self._state_stack)
print("%s[%s(%r)] %s" % (". "*relative_depth,calling_frame,self._state_stack,msg))
def set_recognizers(self, *args):
"""Set the list of functions (or other callables) used in
order to recognize symbols from the stream of text
chunks. Recognizers are tried in the order specified here."""
self.recognizers = args
def set_transitions(self, transitions):
"""Set the transition table for the state matchine.
:param transitions: The transition table, in the form of a mapping between two tuples. The first tuple should be the current state (or a list of possible current states) and a callable function that determines if a particular symbol is recognized ``(currentstate, recognizer)``. The second tuple should be a constructor function (or `False```) and the new state to transition into.
"""
self.transitions = {}
for (before,after) in transitions.items():
(before_states, recognizer) = before
if not callable(after):
(constructor, after_state) = after
assert (constructor == False) or callable(constructor), "Specified constructor %r not callable" % constructor
assert callable(recognizer), "Specified recognizer %r not callable" % recognizer
if (not isinstance(before_states,(list,tuple))):
before_states = [before_states]
for before_state in before_states:
if callable(after):
self._debug("%r,%s() -> %s()" % (before_state, recognizer.__name__, after.__name__))
elif callable(after[0]):
self._debug("%r,%s() -> %s(), %r" % (before_state, recognizer.__name__, after[0].__name__,after[1]))
else:
self._debug("%r,%s() -> %r, %r" % (before_state, recognizer.__name__, after[0],after[1]))
self.transitions[(before_state,recognizer)] = after
def parse(self, chunks):
"""Parse a document in the form of an iterable of suitable
chunks -- often lines or elements. each chunk should be a
string or a string-like obje ct. Some examples::
p = FSMParser()
reader = TextReader("foo.txt")
body = p.parse(reader.getiterator(reader.readparagraph),"body", make_body)
body = p.parse(BeautifulSoup("foo.html").find_all("#main p"), "body", make_body)
body = p.parse(ElementTree.parse("foo.xml").find(".//paragraph"), "body", make_body)
:param chunks: The document to be parsed.
:type chunks: A list (or any other iterable) of text-like objects.
:param initialstate: The initial state for the machine. The
state must be present in the transition
table.
:type initialstate: Could be any object, but strings are
preferrable as they make error messages
easier to understand.
:param initialconstructor: A function that creates a document
root object, and then fills it with
child objects using
.make_children()
:type initialconstructor: Callable
:returns: A document object tree.
"""
if self.debug:
self._debug("Starting parse")
self.reader = Peekable(chunks)
self._state_stack = [self.initial_state]
return self.initial_constructor(self)
def analyze_symbol(self):
"""Internal function used by make_children()"""
try:
rawchunk = self.reader.peek()
if len (rawchunk) > 40:
chunk = repr(rawchunk[:25])[1:-1] + "[...]" + repr(rawchunk[-10:])[1:-1]
else:
chunk = repr(rawchunk)
except StopIteration:
self._debug("We're done!")
return None
ret = None
applicable_tmp= [x[1] for x in self.transitions.keys() if x[0] == self._state_stack[-1]]
# Create correct sorting of applicable_recognizers
applicable_recognizers = []
for recognizer in self.recognizers:
if recognizer in applicable_tmp:
applicable_recognizers.append(recognizer)
self._debug("Testing %r against %s (state %r) " %
(chunk, [x.__name__ for x in applicable_recognizers],
self._state_stack[-1]))
for recognizer in self.recognizers:
if recognizer in applicable_recognizers and recognizer(self):
ret = recognizer
if ret:
self._debug("%r -> %s" % (chunk,ret.__name__))
else:
self._debug("No recognizer for %r" % (chunk))
return ret
raise FSMStateError("No recognizer match for %r" % chunk)
def transition(self, currentstate, symbol):
"""Internal function used by make_children()"""
if (currentstate,symbol) in self.transitions:
t = self.transitions[(currentstate,symbol)]
if callable(t):
return t(symbol,self._state_stack)
else:
return t
else:
raise FSMStateError("Can't transition from %s with %s" % (currentstate,symbol))
def make_child(self, constructor, childstate):
"""Internal function used by make_children(), which calls one
of the constructors defined in the transition table."""
if not childstate:
childstate = self._state_stack[-1]
self._debug("calling child constructor %s" % constructor.__name__)
else:
self._debug("calling child constructor %s in state %r" % (constructor.__name__, childstate))
self._state_stack.append(childstate)
ret = constructor(self)
self._state_stack.pop() # do something with this?
return ret
def make_children(self, parent):
"""Creates child nodes for the current (parent) document node.
:param parent: The parent document node.
:type parent: Any list-like object (preferrably a subclass of
ferenda.element.CompoundElement)
:returns: The same ``parent`` object.
"""
self._debug("Making children for %s" % parent.__class__.__name__)
while True: # we'll break out of this when transition()
# returns a constructor that is False
symbol = self.analyze_symbol()
if symbol is None: # no more symbols
self._debug("We're done!")
return parent
(constructor, newstate) = self.transition(self._state_stack[-1],
symbol)
if constructor is False:
self._debug("transition(%r,%s()) -> (False,%r)" %
(self._state_stack[-1],symbol.__name__,newstate))
else:
self._debug("transition(%r,%s()) -> (%s(),%r)" %
(self._state_stack[-1],symbol.__name__,
constructor.__name__,newstate))
# if transition() indicated that we should change state,
# first find out whether the constructor will call
# make_child, creating a new stack frame. This is
# indicated by the callable having the 'newstate'
# attribute (now set manually, should be through a
# decorator)
if newstate and not hasattr(constructor,'newstate'):
self._debug("Changing top of state stack (%r->%r)" % (self._state_stack[-1], newstate))
self._state_stack[-1] = newstate
if constructor:
element = self.make_child(constructor,newstate)
if element is not None:
parent.append(element)
else:
# special weird hack - set the state we'll be
# returning to by manipulating self._state_stack
if newstate:
self._debug("Changing the state we'll return to (self._state_stack[-2])")
self._debug(" (from %r to %r)" % (self._state_stack[-2],newstate))
self._state_stack[-2] = newstate
return parent
| Python |
import six
from six.moves import builtins
_property = builtins.property
_tuple = builtins.tuple
from operator import itemgetter as _itemgetter
class TocPage(tuple):
'TocPage(linktext, title, basefile)'
__slots__ = ()
_fields = ('linktext', 'title', 'basefile')
def __new__(_cls, linktext, title, basefile):
'Create new instance of TocPage(linktext, title, basefile)'
return _tuple.__new__(_cls, (linktext, title, basefile))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new TocPage object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != 3:
raise TypeError('Expected 3 arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '(linktext=%r, title=%r, basefile=%r)' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
__dict__ = property(_asdict)
def _replace(_self, **kwds):
'Return a new TocPage object replacing specified fields with new values'
result = _self._make(map(kwds.pop, ('linktext', 'title', 'basefile'), _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
linktext = _property(_itemgetter(0), doc='Alias for field number 0')
title = _property(_itemgetter(1), doc='Alias for field number 1')
basefile = _property(_itemgetter(2), doc='Alias for field number 2')
| Python |
from . import DocumentRepository
class PDFDocumentRepository(DocumentRepository):
"""Base class for handling repositories of PDF documents. Parsing
of these documents are a bit more complicated than HTML or text
documents, particularly with the handling of external resources
such as CSS and image files."""
storage_policy = "dir"
downloaded_suffix = ".pdf"
# This implementation is specific for swedish legal material. Move
# to swedishlegalsource and make storage_policy aware.
#
# @classmethod
# def basefile_from_path(cls,path):
# # data/dirsou/downloaded/2006/84/index.pdf -> 2006:84
# seg = path.split(os.sep)
# seg = seg[seg.index(cls.alias)+2:-1]
# seg = [x.replace("-","/") for x in seg]
# assert 2 <= len(seg) <= 3, "list of segments is too long or too short"
# # print "path: %s, seg: %r, basefile: %s" % (path,seg,":".join(seg))
# return ":".join(seg)
def parse_basefile(self, basefile):
reader = self.pdfreader_from_basefile(basefile)
doc = self.parse_from_pdfreader(reader, basefile)
return doc
def pdfreader_from_basefile(self, basefile):
pdffile = self.downloaded_path(basefile)
# Convoluted way of getting the directory of the intermediate
# xml + png files that PDFReader will create
intermediate_dir = os.path.dirname(
self.generic_path(basefile, 'intermediate', '.foo'))
self.setup_logger('pdfreader', self.config.loglevel)
pdf = PDFReader()
pdf.read(pdffile, intermediate_dir)
return pdf
def parse_from_pdfreader(self, pdfreader, basefile):
doc = self.make_document()
doc.uri = self.canonical_uri(basefile)
doc.body = [pdfreader]
d = Describer(doc.meta, doc.uri)
d.rdftype(self.rdf_type)
d.value(self.ns['prov']['wasGeneratedBy'], self.qualified_class_name())
return doc
def create_external_resources(self, doc):
cssfile = self.generic_path(basefile, 'parsed', '.css')
with open(cssfile, "w") as fp:
# Create CSS header with fontspecs
for pdf in doc.body:
assert isinstance(pdf, PDFReader)
for spec in list(pdf.fontspec.values()):
fp.write(".fontspec%s {font: %spx %s; color: %s;}\n" %
(spec['id'], spec['size'], spec['family'], spec['color']))
# 2 Copy all created png files to their correct locations
totcnt = 0
src_base = os.path.splitext(
pdf.filename)[0].replace("/downloaded/", "/intermediate/")
dest_base = self.generic_path(
basefile + "#background", "parsed", "")
for pdf in doc.body:
cnt = 0
for page in pdf:
totcnt += 1
cnt += 1
src = "%s%03d.png" % (src_base, page.number)
dest = "%s%04d.png" % (dest_base, totcnt) # 4 digits, compound docs can be over 1K pages
if util.copy_if_different(src, dest):
self.log.debug("Copied %s to %s" % (src, dest))
fp.write("#page%03d { background: url('%s');}\n" %
(cnt, os.path.basename(dest)))
def list_external_resources(self, doc):
parsed = self.parsed_path(doc.basefile)
resource_dir = os.path.dirname(parsed)
for f in [os.path.join(resource_dir, x) for x in os.listdir(resource_dir)
if os.path.join(resource_dir, x) != parsed]:
yield f
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
class WordReader(object):
"""Reads .docx and .doc-files (the latter with support from antiword) and presents a slightly easier API for dealing with them."""
def read(self, wordfile, workdir=None):
"""Read the file"""
pass
def word_to_html(indoc, outhtml):
"""Converts a word document to a HTML document by remote
controlling Microsoft Word to open and save the doc as HTML (so
this function only works on Win32 with Office 2003 installed)"""
indoc = os.path.join(os.getcwd(), indoc.replace("/", os.path.sep))
outhtml = os.path.join(os.getcwd(), outhtml.replace("/", os.path.sep))
display_indoc = indoc[len(os.getcwd()):].replace(os.path.sep, "/")
display_outhtml = outhtml[len(os.getcwd()):].replace(os.path.sep, "/")
# print "Ensuring dir for %r" % outhtml
ensure_dir(outhtml)
if not os.path.exists(indoc):
print(("indoc %s does not exists (seriously)" % indoc))
if os.path.exists(outhtml):
# if self.verbose: print "local file %s exists, not doing Word->HTML conversion" % display_outhtml
return
#if os.path.exists(outhtml + ".err.log"):
# print "Word->HTML conversion for local file %s has failed, not re-trying"% display_outhtml
# return
from win32com.client import Dispatch
import pywintypes
wordapp = Dispatch("Word.Application")
if wordapp is None:
print("Couldn't start word")
return
# print "word_to_html: %s to %s" % (indoc,outhtml)
try:
wordapp.Documents.Open(indoc)
wordapp.Visible = False
doc = wordapp.ActiveDocument
doc.SaveAs(outhtml, 10) # 10 = filtered HTML output
doc.Close()
doc = None
wordapp.Quit
except pywintypes.com_error as e:
print(("Warning: could not convert %s" % indoc))
print((e[2][2]))
errlog = open(outhtml + ".err.log", "w")
errlog.write("%s:\n%s" % (indoc, e))
| Python |
from . import DocumentRepository
class CompositeRepository(DocumentRepository):
instances = {}
@classmethod
def get_instance(cls, instanceclass, options={}):
if not instanceclass in cls.instances:
# print "Creating a %s class with options %r" % (instanceclass.__name__,options)
cls.instances[instanceclass] = instanceclass(options)
return cls.instances[instanceclass]
@classmethod
def list_basefiles_for(cls, action, basedir):
if action == "parse_all":
documents = set()
for c in cls.subrepos:
# instance = cls.get_instance(c)
for basefile in c.list_basefiles_for("parse_all", basedir):
if basefile not in documents:
documents.add(basefile)
yield basefile
elif action in ("generate_all", "relate_all"):
#super(CompositeRepository,cls).list_basefiles_for(action,basedir)
# copied code from DocumentRepository.list_basefiles_for --
# couldn't figure out how to call super on a generator
# function.
directory = os.path.sep.join((basedir, cls.alias, "parsed"))
suffix = ".xhtml"
for x in util.list_dirs(directory, suffix, reverse=True):
yield cls.basefile_from_path(x)
def __init__(self, options):
self.myoptions = options
super(CompositeRepository, self).__init__(options)
if 'log' in self.myoptions:
# print "Log set: %s" % self.myoptions['log']
pass
else:
# print "Setting log to %s" % self.log.getEffectiveLevel()
self.myoptions['log'] = logging.getLevelName(
self.log.getEffectiveLevel())
for c in self.subrepos:
inst = self.get_instance(c, dict(self.myoptions))
# print "DEBUG: Inst %s: log level %s" % (inst, logging.getLevelName(inst.log.getEffectiveLevel()))
def download(self):
for c in self.subrepos:
inst = c(options=self.myoptions)
inst.download()
def parse(self, basefile):
start = time()
self.log.debug("%s: Starting", basefile)
for c in self.subrepos:
inst = self.get_instance(c, self.myoptions)
inst.log.setLevel(logging.INFO)
if os.path.exists(inst.downloaded_path(basefile)):
if os.path.exists(inst.parsed_path(basefile)):
self.log.debug("%s: Using previously-created result (by %s)" %
(basefile, inst.__class__.__name__))
self.copy_parsed(basefile, inst)
return True
elif inst.parse(basefile):
self.log.info("%s: Created %s (using %s)" %
(basefile, self.parsed_path(basefile), inst.__class__.__name__))
self.copy_parsed(basefile, inst)
self.log.info(
'%s: OK (%.3f sec)', basefile, time() - start)
return True
return False
# instancelist = ", ".join([x.__class__.__name__ for x in instances])
# self.log.debug("%s in %d repos (%s)" %
# (basefile, len(instances),instancelist))
# self.join_parsed(basefile,instances)
def copy_parsed(self, basefile, instance):
# If the distilled and parsed links are recent, assume that
# all external resources are OK as well
if (util.outfile_is_newer([instance.distilled_path(basefile)],
self.distilled_path(basefile)) and
util.outfile_is_newer([instance.parsed_path(basefile)],
self.parsed_path(basefile))):
self.log.debug(
"%s: External resources are (probably) up-to-date" % basefile)
return
cnt = 0
for src in instance.list_external_resources(basefile):
cnt += 1
target = (os.path.dirname(self.parsed_path(basefile)) +
os.path.sep +
os.path.basename(src))
util.link_or_copy(src, target)
util.link_or_copy(instance.distilled_path(basefile),
self.distilled_path(basefile))
util.link_or_copy(instance.parsed_path(basefile),
self.parsed_path(basefile))
if cnt:
self.log.debug("%s: Linked %s external resources from %s to %s" %
(basefile,
cnt,
os.path.dirname(instance.parsed_path(basefile)),
os.path.dirname(self.parsed_path(basefile))))
# Not used -- see copy_parsed instead
def join_parsed(self, basefile, instances):
# The default algorithm for creating a joined/composite result:
# 1. Load all distilled files and add any unique triple to a
# composite graph
composite = Graph()
# FIXME: Construct this list of bound namespaces dynamically somehow)
composite.bind('dct', self.ns['dct'])
composite.bind('rpubl', self.ns['rpubl'])
composite.bind('xsd', self.ns['xsd'])
composite.bind('foaf', self.ns['foaf'])
composite.bind('xhv', self.ns['xhv'])
for inst in instances:
if os.path.exists(inst.distilled_path(basefile)):
g = Graph()
g.parse(inst.distilled_path(basefile))
composite += g
distilled_file = self.distilled_path(basefile)
util.ensure_dir(distilled_file)
composite.serialize(
distilled_file, format="pretty-xml", encoding="utf-8")
# 2. Use the first produced xhtml file (by the order specified
# in self.supbrepos)
#
# FIXME: The trouble with this is that our distilled RDF/XML
# file will most often contain a superset of all RDF triples
# found in one particular XHTML+RDFa file.
for inst in instances:
if os.path.exists(inst.parsed_path(basefile)):
self.copy_external_resources(basefile, inst)
# Not sure this belongs in CompositeRepository -- maybe should be
# part of the base implementation, or maybe we shouldn't copy
# resources like this at all (instead make sure the server serves
# resources up from the parsed directory)?
def generate(self, basefile):
# create self.generated_path(basefile)
super(CompositeRepository, self).generate(basefile)
# then link all other files from parsed that are not self.parse_path(basefile)
# FIXME: dup code of copy_parsed and Regeringen.list_resources()
parsed = self.parsed_path(basefile)
resource_dir = os.path.dirname(parsed)
for src in [os.path.join(resource_dir, x) for x in os.listdir(resource_dir)
if os.path.join(resource_dir, x) != parsed]:
target = (os.path.dirname(self.generated_path(basefile)) +
os.path.sep +
os.path.basename(src))
self.log.debug("Linking %s to %s" % (target, src))
util.link_or_copy(src, target)
| Python |
from pprint import pprint
import whoosh.index
import whoosh.fields
import whoosh.analysis
import whoosh.query
import whoosh.qparser
import whoosh.writing
from ferenda import util
class FulltextIndex(object):
"""Open a fulltext index (creating it if it doesn't already exists).
:param location: The file path (or URI) of the fulltext index.
:type location: str
:param docrepos: DocumentRepo instances, used to create extra fields in the full text index
:type docrepos: list
"""
def __init__(self,location,docrepos=None):
default_schema = {'uri':Identifier(),
'repo':Label(),
'basefile':Label(),
'title':Text(boost=4),
'identifier':Label(boost=16),
'text':Text()}
if whoosh.index.exists_in(location):
self._index = whoosh.index.open_dir(location)
else:
self._index = self._create_whoosh_index(location,default_schema)
self._schema = default_schema
self._writer = None
self._batchwriter = False
def _create_whoosh_index(self,location,fields):
# maps our field classes to concrete whoosh field instances
mapped_field = {Identifier(): whoosh.fields.ID(unique=True),
Label(): whoosh.fields.ID(stored=True),
Label(boost=16):whoosh.fields.ID(field_boost=16,stored=True),
Text(boost=4): whoosh.fields.TEXT(field_boost=4,stored=True,
analyzer=whoosh.analysis.StemmingAnalyzer()),
Text(): whoosh.fields.TEXT(stored=True,
analyzer=whoosh.analysis.StemmingAnalyzer())}
whoosh_fields = {}
for key,fieldtype in fields.items():
whoosh_fields[key] = mapped_field[fieldtype]
schema = whoosh.fields.Schema(**whoosh_fields)
util.mkdir(location)
return whoosh.index.create_in(location,schema)
def schema(self):
"""Returns the schema in use."""
return self._schema
def update(self, uri, repo, basefile, title, identifier, text, **kwargs):
# Other standard attrs: typeof?
"""Insert (or update) a resource in the fulltext index. A
resource may be an entire document, but it can also be any
part of a document that is referenceable (i.e. a document node
that has ``@typeof`` and ``@about`` attributes), ie. a
document with 100 sections can be stored as 100 independent
resources.
:param uri: unique key for the resource in the form of a URI
:type uri: str
:param repo: alias for docrepo
:type repo: str
:param basefile: basefile containing the resource
:type basefile: str
:param title: User-displayable title of resource (if applicable). Should not contain the same information as ``identifier``.
:type title: str
:param identifier: User-displayable short identifier for resource (if applicable)
:type identifier: str
:type text: The full textual content of the resource, as a plain string.
:type text: str
"""
if not self._writer:
if self._batchwriter:
# self._writer = self._index.writer(procs=4, limitmb=256, multisegment=True)
self._writer = whoosh.writing.BufferedWriter(self._index, limit=1000)
#indexwriter = self._index.writer()
#stemfilter = indexwriter.schema["text"].analyzer[-1]
#stemfilter.cachesize = -1
#stemfilter.clear()
#indexwriter.close()
else:
self._writer = self._index.writer()
# A whoosh document is not the same as a ferenda document. A
# ferenda document may be indexed as several (tens, hundreds
# or more) whoosh documents
self._writer.update_document(uri=uri,
repo=repo,
basefile=basefile,
title=title,
identifier=identifier,
text=text,
**kwargs)
def commit(self):
"""Commit all pending updates to the fulltext index."""
if self._writer:
self._writer.commit()
if not isinstance(self._writer, whoosh.writing.BufferedWriter):
# A bufferedWriter can be used again after commit(), a regular writer cannot
self._writer = None
def close(self):
self.commit()
if self._writer:
self._writer.close()
self._writer = None
def __del__(self):
self.close()
def doccount(self):
"""Returns the number of currently indexed (non-deleted) documents."""
return self._index.doc_count()
def query(self,q, **kwargs):
"""Perform a free text query against the full text index, optionally restricted with parameters for individual fields.
:param q: Free text query, using the selected full text index's prefered query syntax
:type q: str
:param **kwargs: any parameter will be used to match a similarly-named field
:type **kwargs: dict
:returns: matching documents, each document as a dict of fields
:rtype: list
"""
searchfields = ['identifier','title','text']
mparser = whoosh.qparser.MultifieldParser(searchfields,
self._index.schema)
query = mparser.parse(q)
# query = whoosh.query.Term("text",q)
with self._index.searcher() as searcher:
res = self._convert_result(searcher.search(query))
return res
def _convert_result(self,res):
# converts a whoosh.searching.Results object to a plain list of dicts
l = []
for hit in res:
l.append(hit.fields())
return l
class IndexedType(object):
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(tuple(v for k, v in sorted(self.__dict__.items())))
def __init__(self,**kwargs):
self.__dict__ = dict(kwargs)
def __repr__(self):
# eg '<Label boost=16>' or '<Identifier>'
dictrepr = "".join((" %s=%s"%(k,v) for k,v in sorted(self.__dict__.items())))
return ("<%s%s>" % (self.__class__.__name__, dictrepr))
class Identifier(IndexedType): pass
class Datetime(IndexedType): pass
class Text(IndexedType): pass
class Label(IndexedType): pass
class Keywords(IndexedType): pass
class Boolean(IndexedType): pass
class URI(IndexedType): pass
class SearchModifier(object): pass
class Less(SearchModifier): pass
class More(SearchModifier): pass
class Between(SearchModifier): pass
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
import re
import xml.etree.cElementTree as ET
from tempfile import mktemp
import logging
import six
from ferenda import util
from .elements import UnicodeElement, CompoundElement, OrdinalElement, serialize
class Textbox(CompoundElement):
def __init__(self, *args, **kwargs):
assert 'top' in kwargs, "top attribute missing"
assert 'left' in kwargs, "left attribute missing"
assert 'width' in kwargs, "width attribute missing"
assert 'height' in kwargs, "height attribute missing"
assert 'font' in kwargs, "font attribute missing"
self.top = int(kwargs['top'])
self.left = int(kwargs['left'])
self.width = int(kwargs['width'])
self.height = int(kwargs['height'])
self.right = self.left + self.width
self.bottom = self.top + self.height
self.__fontspecid = kwargs['font']
self.__fontspec = kwargs['fontspec']
del kwargs['top']
del kwargs['left']
del kwargs['width']
del kwargs['height']
del kwargs['font']
del kwargs['fontspec']
super(Textbox, self).__init__(*args, **kwargs)
def __unicode__(self):
return "".join(self)
def getfont(self):
return self.__fontspec[self.__fontspecid]
class Textelement(UnicodeElement):
pass
class PDFReader(CompoundElement):
def __init__(self):
self.fontspec = {}
self.log = logging.getLogger('pdfreader')
def read(self, pdffile, workdir=None):
self.filename = pdffile
assert os.path.exists(pdffile), "PDF %s not found" % pdffile
if not workdir:
workdir = os.path.dirname(pdffile)
basename = os.path.basename(pdffile)
xmlfile = os.sep.join(
(workdir, os.path.splitext(basename)[0] + ".xml"))
if not util.outfile_is_newer([pdffile], xmlfile):
tmppdffile = os.sep.join([workdir, basename])
util.copy_if_different(pdffile, tmppdffile)
# two pass coding: First use -c (complex) to extract
# background pictures, then use -xml to get easy-to-parse
# text with bounding boxes.
cmd = "pdftohtml -nodrm -c %s" % tmppdffile
self.log.debug("Converting: %s" % cmd)
(returncode, stdout, stderr) = util.runcmd(cmd,
require_success=True)
# print "RET: %s, STDOUT: %s, STDERR: %s" % (returncode,stdout,stderr)
# we won't need the html files
for f in os.listdir(workdir):
if f.endswith(".html"):
os.unlink(workdir + os.sep + f)
cmd = "pdftohtml -nodrm -xml %s" % tmppdffile
self.log.debug("Converting: %s" % cmd)
(returncode, stdout, stderr) = util.runcmd(cmd,
require_success=True)
return self.parse_xml(xmlfile)
def set_background_path():
pass
def parse_xml(self, xmlfile):
self.log.debug("Loading %s" % xmlfile)
assert os.path.exists(xmlfile), "XML %s not found" % xmlfile
try:
tree = ET.parse(xmlfile)
except ET.ParseError as e:
self.log.warning("'%s', working around" % e)
#fix = PDFXMLFix()
#fix.fix(xmlfile)
tree = ET.parse(xmlfile)
# for each page element
for pageelement in tree.getroot():
page = Page(number=int(pageelement.attrib['number']), # always int?
width=int(pageelement.attrib['width']),
height=int(pageelement.attrib['height']),
background=None)
background = "%s%03d.png" % (
os.path.splitext(xmlfile)[0], page.number)
if os.path.exists(background):
page.background = background
else:
print(("Did not set %s as background: Doesn't exist" %
background))
# print("Creating page %s" % page.number)
assert pageelement.tag == "page", "Got <%s>, expected <page>" % page.tag
for element in pageelement:
if element.tag == 'fontspec':
self.fontspec[element.attrib['id']] = element.attrib
elif element.tag == 'text':
# print ET.tostring(element)
attribs = element.attrib
attribs['fontspec'] = self.fontspec
b = Textbox(**attribs)
# eliminate "empty" textboxes
if element.text and element.text.strip() == "" and not element.getchildren():
# print "Skipping empty box"
continue
if element.text and element.text.strip():
b.append(Textelement(str(element.text)))
# The below loop could be done recursively to
# support arbitrarily deep nesting (if we change
# Textelement to be a non-unicode derived type),
# but pdftohtml should not create such XML (there
# is no such data in the PDF file)
for child in element:
grandchildren = child.getchildren()
# special handling of the <i><b> construct
if grandchildren != []:
#print "Grandchildren handling: %s '%s' '%s'" % (len(grandchildren),
# child.text,
# child.tail)
assert (len(grandchildren) == 1), "General grandchildren not supported"
if child.text:
Textelement(str(child.text), tag=child.tag)
b.append(Textelement(
str(grandchildren[0].text), tag="ib"))
if child.tail:
Textelement(str(child.tail), tag=child.tag)
else:
b.append(
Textelement(str(child.text), tag=child.tag))
if child.tail:
b.append(Textelement(str(child.tail)))
if element.tail and element.tail.strip(): # can this happen?
b.append(Textelement(str(element.tail)))
page.append(b)
# done reading the page
self.append(page)
self.log.debug("PDFReader initialized: %d pages, %d fontspecs" %
(len(self), len(self.fontspec)))
def avg_font_size(self):
pass
def median_font_size(self):
pass
def median_box_width(self, threshold=0):
boxwidths = []
for page in self:
for box in page:
if box.right - box.left < threshold:
continue
# print "Box width: %d" % (box.right-box.left)
boxwidths.append(box.right - box.left)
boxwidths.sort()
return boxwidths[int(len(boxwidths) / 2)]
def common_horizontal_gutters():
pass
def common_vertical_gutters():
pass
class Page(CompoundElement, OrdinalElement):
# also has
# self.width
# self.height
def vertical_gutters(self):
return ((0, 42), (463, 482))
def horizontal_gutters(self):
return ((0, 42), (463, 482))
# text: can be string, re obj or callable (gets called with the box obj)
# fontsize: can be int or callable
# fontname: can be string or callable
# top,left,bottom,right
def boundingbox(self, top=0, left=0, bottom=None, right=None):
if not bottom:
bottom = self.height
if not right:
right = self.width
for box in self:
# print u" Examining [%dx%d][%dx%d] against constraints [%dx%d][%dx%d]" % (box.top,box.left,box.bottom,box.right, top,left,bottom,right)
#if (box.top >= top): print " Top OK"
#if (box.left >= left): print " Left OK"
#if (box.bottom <= bottom): print " Bottom OK"
#if (box.right <= right): print " Right OK"
if (box.top >= top and
box.left >= left and
box.bottom <= bottom and
box.right <= right):
#print " SUCCESS"
yield box
#else:
# print " FAIL"
def crop(self, top=0, left=0, bottom=None, right=None):
# Crop any text box that sticks out
# Actually if top and left != 0, we need to adjust them
newboxes = []
for box in self.boundingbox(top, left, bottom, right):
box.top = box.top - top
box.left = box.left - left
newboxes.append(box)
self[:] = []
self.extend(newboxes)
self.width = right - left
self.height = bottom - top
# Then crop the background images... somehow
if os.path.exists(self.background):
cmdline = "convert %s -crop %dx%d+%d+%d +repage %s" % (self.background,
self.width, self.height, left, top,
self.background + ".new")
# print "Running %s" % cmdline
(returncode, stdout, stderr) = util.runcmd(cmdline,
require_success=True)
util.replace_if_different(
"%s.new" % self.background, self.background)
def __unicode__(self):
textexcerpt = " ".join([str(x) for x in self])
return "Page %d (%d x %d): '%s...'" % (self.number, self.width, self.height, str(textexcerpt[:40]))
def __str__(self):
return str(self).encode('ascii')
if __name__ == '__main__':
if len(sys.argv) != 2:
print(("Usage: %s pdffile" % (sys.argv[0])))
else:
r = PDFReader()
r.read(sys.argv[1])
for page in r:
print((str(page)))
for box in page.boundingbox(0, 500, 1029, 700):
print((" [%dx%d][%dx%d] %s" % (box.top,
box.left, box.bottom, box.right, str(box))))
print((r.median_box_width(threshold=0)))
# Fixes a error with incorrectly nested tags often found in pdftohtml
# generated xml
if not six.PY3:
import sgmllib
from xml.sax.saxutils import escape as xml_escape
import unicodedata
class PDFXMLFix(sgmllib.SGMLParser):
selfclosing = ["fontspec"]
# preparations to remove invalid chars in handle_data
all_chars = (unichr(i) for i in range(0x10000))
control_chars = ''.join(
c for c in all_chars if unicodedata.category(c) == 'Cc')
# tab and newline are technically Control characters in
# unicode, but we want to keep them.
control_chars = control_chars.replace("\t", "").replace("\n", "")
control_char_re = re.compile('[%s]' % re.escape(control_chars))
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self.tags = []
self.fp = None
def fix(self, filename):
usetempfile = not self.fp
if usetempfile:
tmpfile = mktemp()
self.fp = open(tmpfile, "w")
self.fp.write('<?xml version="1.0" encoding="UTF-8"?>')
f = open(filename)
while True:
s = f.read(8192)
if not s:
break
self.feed(s)
self.close()
if usetempfile:
self.fp.close()
if util.replace_if_different(tmpfile, filename):
print(("replaced %s with %s" % (filename, tmpfile)))
else:
print(("%s was identical to %s" % (filename, tmpfile)))
def close(self):
sgmllib.SGMLParser.close(self)
if self.tags:
sys.stderr.write(
"WARNING: opened tag(s) %s not closed" % self.tags)
self.fp.write(
"".join(["</%s>" % x for x in reversed(self.tags)]))
def handle_decl(self, decl):
# self.fp.write "Decl: ", decl
self.fp.write("<!%s>" % decl)
def handle_data(self, data):
len_before = len(data)
data = xml_escape(self.control_char_re.sub('', data))
len_after = len(data)
# self.fp.write "Data: ", data.strip()
#if len_before != len_after:
# sys.stderr.write("WARNING: data changed from %s to %s chars: %r\n" % (len_before,len_after,data))
self.fp.write(data)
def unknown_starttag(self, start, attrs):
# self.fp.write "Start: ", start, attrs
if start in self.selfclosing:
close = "/"
else:
close = ""
self.tags.append(start)
# sys.stderr.write(repr(self.tags)+"\n")
if attrs:
fmt = ['%s="%s"' % (x[0], x[1]) for x in attrs]
self.fp.write("<%s %s%s>" % (start, " ".join(fmt), close))
else:
self.fp.write("<%s>" % start)
def unknown_endtag(self, end):
# sys.stderr.write(repr(self.tags)+"\n")
start = self.tags.pop()
if end != start and end in self.tags:
# sys.stderr.write("%s is not %s, working around\n" % (end, start))
self.fp.write("</%s>" % start)
self.fp.write("</%s>" % end)
self.fp.write("<%s>" % start)
else:
self.fp.write("</%s>" % end)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
import tempfile
import shutil
import time
import json
import codecs
import collections
import filecmp
from io import BytesIO, StringIO
from difflib import unified_diff
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
if sys.version_info < (2, 7, 0):
import unittest2 as unittest
else:
import unittest
import six
from six import text_type as str
from six import binary_type as bytes
import rdflib
from rdflib.compare import graph_diff
from rdflib.util import guess_format
from lxml.doctestcompare import LXMLOutputChecker
from lxml import etree
from ferenda import DocumentRepository
from ferenda import TextReader
from ferenda import elements
from ferenda import util
class FerendaTestCase(object):
"""Convenience class with extra AssertEqual methods."""
# FIXME: Some of these should (at least optionally) be registered
# with TestCase.assertEqual through .addTypeEqualityFunc, but some
# (eg. assertEqualDirs) have non-unique types
def assertEqualGraphs(self, want, got, exact=True):
def _loadgraph(filename):
g = rdflib.Graph()
g.parse(filename, format=guess_format(filename))
return g
if not isinstance(want, rdflib.Graph):
want = _loadgraph(want)
if not isinstance(got, rdflib.Graph):
got = _loadgraph(got)
(in_both, in_first, in_second) = graph_diff(want, got)
msg = ""
if in_first:
for (s, p, o) in sorted(in_first, key=lambda t:(t[0], t[1],t[2])):
msg += "- %s %s %s\n" % (s.n3(), p.n3(), o.n3())
if (exact and in_second) or in_first:
for (s, p, o) in sorted(in_second, key=lambda t:(t[0], t[1],t[2])):
msg += "+ %s %s %s\n" % (s.n3(), p.n3(), o.n3())
if ((len(in_first) > 0) or (len(in_second) > 0 and exact)):
if len(in_first) > 0:
msg = "%s expected triples were not found\n" % len(in_first) + msg
if len(in_second) > 0:
msg = "%s unexpected triples were found\n" % len(in_second) + msg
msg = "%r != %r\n" % (want, got) + msg
self.fail(msg)
def assertAlmostEqualDatetime(self, datetime1, datetime2, delta=1):
# if the datetimes differ with max 1 second, they're almost equal)
time1 = time.mktime(datetime1.timetuple())
time2 = time.mktime(datetime2.timetuple())
absdiff = abs(time1 - time2)
self.assertLessEqual(absdiff, delta, "Difference between %s and %s "
"is %s seconds which is NOT almost equal" %
(datetime1.isoformat(), datetime2.isoformat(),
absdiff))
def assertEqualXML(self, want, got):
# Adapted from formencode, https://bitbucket.org/ianb/formencode/
def xml_compare(want, got, reporter):
if want.tag != got.tag:
reporter("Tags do not match: 'want': %s, 'got': %s" % (want.tag, got.tag))
return False
for name, value in want.attrib.items():
if got.attrib.get(name) != value:
reporter("Attributes do not match: 'want': %s=%r, 'got': %s=%r"
% (name, value, name, got.attrib.get(name)))
return False
for name in got.attrib.keys():
if name not in want.attrib:
reporter("'got' has an attribute 'want' is missing: %s"
% name)
return False
if not text_compare(want.text, got.text):
reporter("text: 'want': %r, 'got': %r" % (want.text, got.text))
return False
if not text_compare(want.tail, got.tail):
reporter("tail: 'want': %r != 'got': %r" % (want.tail, got.tail))
return False
cl1 = want.getchildren()
cl2 = got.getchildren()
if len(cl1) != len(cl2):
reporter("children length differs, 'want': %i, 'got': %i"
% (len(cl1), len(cl2)))
return False
i = 0
for c1, c2 in zip(cl1, cl2):
i += 1
if not xml_compare(c1, c2, reporter=reporter):
reporter('children %i do not match: %s'
% (i, c1.tag))
return False
return True
def text_compare(want, got):
if not want and not got:
return True
return (want or '').strip() == (got or '').strip()
def treeify(something):
if isinstance(something, str):
fp = BytesIO(something.encode('utf-8'))
# return etree.fromstring(something)
return etree.parse(fp)
elif isinstance(something, bytes):
fp = BytesIO(something)
# return etree.parse(fp).getroot()
return etree.parse(fp)
elif isinstance(want, etree._Element):
# FIXME: wrap in ElementTree
return something
else:
raise ValueError("Can't convert a %s into an ElementTree" % type(something))
def c14nize(tree):
tmp = BytesIO()
tree.write_c14n(tmp)
return tmp.getvalue().decode('utf-8')
errors = []
want_tree = treeify(want)
got_tree = treeify(got)
xml_compare(want_tree.getroot(),
got_tree.getroot(),
errors.append)
if errors:
want_lines = [x + "\n" for x in c14nize(want_tree).split("\n")]
got_lines = [x + "\n" for x in c14nize(got_tree).split("\n")]
diff = unified_diff(want_lines, got_lines, "want.xml", "got.xml")
msg = "".join(diff) + "\n\nERRORS:" + "\n".join(errors)
raise AssertionError(msg)
def assertEqualDirs(self, want, got, suffix=None, filterdir="entries"):
wantfiles = [x[len(want) + 1:] for x in util.list_dirs(want, suffix) if not x.startswith(want+os.sep+filterdir)]
gotfiles = [x[len(got) + 1:] for x in util.list_dirs(got, suffix) if not x.startswith(got+os.sep+filterdir)]
self.maxDiff = None
self.assertEqual(wantfiles, gotfiles) # or assertIn?
for f in gotfiles:
self.assertTrue(filecmp.cmp(os.path.join(want, f),
os.path.join(got, f),
shallow=False))
class RepoTester(unittest.TestCase, FerendaTestCase):
# A subclass must override these two
repoclass = DocumentRepository
docroot = '/tmp'
def setUp(self):
self.datadir = tempfile.mkdtemp()
self.repo = self.repoclass(datadir=self.datadir)
def tearDown(self):
# print("Not removing %s" % self.datadir)
shutil.rmtree(self.datadir)
def filename_to_basefile(self, filename):
"""Converts a test filename to a basefile. Default implementation
simply returns a hard-coded basefile.
:param filename: The test file
:type filename: str
:returns: Corresponding basefile
:rtype: str
"""
return "1"
def download_test(self, specfile):
def my_get(url, **kwargs):
urlspec = spec[url]
if isinstance(urlspec, str):
urlspec = {'file': urlspec}
if 'charset' not in urlspec:
urlspec['charset'] = 'utf-8'
url_location = os.path.join(os.path.dirname(specfile),
urlspec['file'])
res = Mock()
# load up both .text and .content properties
with codecs.open(url_location, "r", encoding=urlspec['charset']) as fp:
res.text = fp.read()
with open(url_location, "rb") as fp:
res.content = fp.read()
res.headers = collections.defaultdict(lambda: None)
res.headers['X-These-Headers-Are'] = 'Faked'
res.status_code = 200
return res
with open(specfile) as fp:
spec = json.load(fp)
with patch('requests.get', side_effect=my_get):
self.repo.download()
# organize a temporary copy of files that we can compare our results to
wantdir = "%s/%s-want" % (self.datadir, self.repoclass.alias)
for url in spec:
if "expect" in spec[url]:
sourcefile = os.path.join(os.path.dirname(specfile),
spec[url]['file'])
wantfile = "%s/%s" % (wantdir, spec[url]['expect'])
util.copy_if_different(sourcefile,wantfile)
self.assertEqualDirs(wantdir,
"%s/%s" % (self.datadir,
self.repoclass.alias))
def distill_test(self, downloaded_file, rdf_file, docroot):
try:
prefixlen = len(docroot+"/downloaded/")
suffixlen = len(self.repo.store.downloaded_suffix)
pathfrag = downloaded_file[prefixlen:-suffixlen]
basefile = self.repo.store.pathfrag_to_basefile(pathfrag)
except:
basefile = self.filename_to_basefile(downloaded_file)
with patch('ferenda.DocumentStore.downloaded_path',
return_value=downloaded_file):
self.repo.parse(basefile)
self.assertEqualGraphs(rdf_file,
self.repo.store.distilled_path(basefile),
exact=False)
def parse_test(self, downloaded_file, xhtml_file, docroot):
# patch method so we control where the downloaded doc is
# loaded from.
basefile = self.filename_to_basefile(downloaded_file)
with patch('ferenda.DocumentStore.downloaded_path',
return_value=downloaded_file):
self.repo.parse(basefile)
if 'FERENDA_SET_TESTFILES' in os.environ:
print("Overwriting %s with result of parse(%s)" % (xhtml_file, basefile))
util.robust_rename(xhtml_file, xhtml_file+"~")
shutil.copy2(self.repo.store.parsed_path(basefile), xhtml_file)
return
self.assertEqualXML(util.readfile(xhtml_file),
util.readfile(self.repo.store.parsed_path(basefile)))
# for win32 compatibility and simple test case code
def p(self, path, prepend_datadir=True):
if prepend_datadir:
path = self.datadir + "/" + path
return path.replace('/', '\\') if os.sep == '\\' else path
def parametrize(cls, template_method, name, params, wrapper=None):
def test_method(self):
template_method(self, *params)
# py2 compat: name is a unicode object, func.__name__ must be a str(?)
if six.PY3:
test_method.__name__ = name
else:
# note that we have redefined str to six.text_type
test_method.__name__ = bytes(name)
# wrapper is a unittest decorator like skip or expectedFailure
if wrapper:
setattr(cls, name, wrapper(test_method))
else:
setattr(cls, name, test_method)
def file_parametrize(cls, directory, suffix, filter=None, wrapper=None):
# Call with any class that subclasses unittest.TestCase and which has
# a parametric_test method, like so:
#class MyTest(unittest.TestCase):
# def parametric_test(self,filename):
# self.assertTrue(os.path.exists(filename))
#
#from ferenda.testutil import file_parametrize
#
#file_parametrize(Parse,"test/files/legaluri",".txt")
params = []
for filename in os.listdir(directory):
if filename.endswith(suffix):
if filter and filter(filename):
continue
testname = filename[:-len(suffix)]
testname = "test_" + testname.replace("-", "_")
params.append((testname, directory + os.path.sep + filename))
for (name, param) in params:
parametrize(cls, cls.parametric_test, name, (param,), wrapper)
def parametrize_repotester(cls):
docroot = cls.docroot
# 1. download tests
for filename in os.listdir(docroot + "/source"):
if filename.endswith(".json"):
testname = "test_download_" + filename[:-5].replace("-", "_")
fullname = docroot + "/source/" + filename
parametrize(cls, cls.download_test, testname, (fullname,))
# 2. parse tests
suf = cls.repoclass.downloaded_suffix
# for filename in os.listdir(docroot + "/downloaded"):
basedir = docroot + "/downloaded"
for filename in util.list_dirs(basedir, suffix=suf):
filename = filename[len(basedir)+1:]
if filename.endswith(suf):
downloaded_file = "%s/downloaded/%s" % (docroot, filename)
basefile = os.path.splitext(filename)[0] # shld we use store.pathfrag_to_basefile?
basefile = basefile.replace("\\", "/")
basetest = basefile.replace("-","_").replace("/", "_")
# Test 1: is rdf distilled correctly?
rdf_file = "%s/distilled/%s.ttl" % (docroot, basefile)
testname = ("test_distill_" + basetest)
wrapper = unittest.expectedFailure if not os.path.exists(rdf_file) else None
parametrize(cls, cls.distill_test, testname, (downloaded_file, rdf_file, docroot), wrapper)
# Test 2: is xhtml parsed correctly?
xhtml_file = "%s/parsed/%s.xhtml" % (docroot, basefile)
testname = ("test_parse_" + basetest)
wrapper = unittest.expectedFailure if not os.path.exists(xhtml_file) else None
parametrize(cls, cls.parse_test, testname, (downloaded_file, xhtml_file, docroot), wrapper)
def testparser(testcase, parser, filename):
wantfilename = filename.replace(".txt", ".xml")
if not os.path.exists(wantfilename):
parser.debug = True
tr = TextReader(filename, encoding="utf-8", linesep=TextReader.UNIX)
b = parser.parse(tr.getiterator(tr.readparagraph))
testcase.maxDiff = 4096
if os.path.exists(wantfilename):
with codecs.open(wantfilename, encoding="utf-8") as fp:
want = fp.read().strip()
got = elements.serialize(b).strip()
testcase.assertEqualXML(want,got)
else:
raise AssertionError("Want file not found. Result of parse:\n" +
elements.serialize(b))
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import six
if six.PY3:
from urllib.parse import quote
else:
from urllib import quote
def generic(d):
querystring = "&".join([quote(k) + "=" + quote(v) for (k, v) in d.items()])
return "http://example.org/?%s" % querystring
def url(d):
if ('fragment' not in d and 'query' not in d):
return "%(scheme)s://%(netloc)s%(path)s" % d
elif 'fragment' not in d:
return "%(scheme)s://%(netloc)s%(path)s?%(query)s" % d
elif 'query' not in d:
return "%(scheme)s://%(netloc)s%(path)s#%(fragment)s" % d
else:
return "%(scheme)s://%(netloc)s%(path)s?%(query)s#%(fragment)s" % d
def eulaw(d):
raise NotImplementedError()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import functools
import codecs
import time
import six
from six import text_type as str
from rdflib import Graph
from ferenda import util
from ferenda.errors import DocumentRemovedError, ParseError
def timed(f):
"""Automatically log a statement of how long the function call takes"""
@functools.wraps(f)
def wrapper(self, doc):
start = time.time()
ret = f(self, doc)
# FIXME: We shouldn't log this if we don't actually do any
# work. The easiest way is to make sure parseifneeded wraps
# timed, not the other way round.
self.log.info('%s: OK (%.3f sec)', doc.basefile, time.time() - start)
return ret
return wrapper
def recordlastdownload(f):
"""FIXME"""
@functools.wraps(f)
def wrapper(self):
return f(self)
return wrapper
def parseifneeded(f):
"""Makes sure the parse function is only called if needed, i.e. if
the outfile is nonexistent or older than the infile(s), or if the
user has specified in the config file or on the command line that
it should be re-generated."""
@functools.wraps(f)
def wrapper(self, doc):
# note: We hardcode the use of .downloaded_path, .parsed_path
# and the 'parseforce' config option, which means that this
# decorator can only be used sensibly with the .parse()
# function.
infile = self.store.downloaded_path(doc.basefile)
outfile = self.store.parsed_path(doc.basefile)
force = (self.config.force is True or
self.config.parseforce is True)
if not force and util.outfile_is_newer([infile], outfile):
self.log.debug("%s: Skipped", doc.basefile)
return True # Signals that everything is OK
else:
self.log.debug("%s: Starting", doc.basefile)
return f(self, doc)
return wrapper
def render(f):
def iterate_graphs(node):
res = []
if hasattr(node,'meta') and node.meta is not None:
res.append(node.meta)
for subnode in node:
if not isinstance(subnode,six.string_types):
res.extend(iterate_graphs(subnode))
return res
@functools.wraps(f)
def wrapper(self, doc):
ret = f(self, doc)
updated = self.render_xhtml(doc, self.store.parsed_path(doc.basefile))
if updated:
self.log.debug("%s: Created %s" % (doc.basefile,self.store.parsed_path(doc.basefile)))
# css file + background images + png renderings of text
self.create_external_resources(doc)
# Check to see that all metadata contained in doc.meta is
# present in the serialized file.
distilled_graph = Graph()
with codecs.open(self.store.parsed_path(doc.basefile), encoding="utf-8") as fp: # unicode
distilled_graph.parse(data=fp.read(), format="rdfa")
util.ensure_dir(self.store.distilled_path(doc.basefile))
with open(self.store.distilled_path(doc.basefile), "wb") as distilled_file:
#print("============distilled===============")
#print(distilled_graph.serialize(format="turtle").decode('utf-8'))
distilled_graph.serialize(distilled_file, format="pretty-xml")
self.log.debug(
'%s: %s triples extracted to %s', doc.basefile, len(distilled_graph), self.store.distilled_path(doc.basefile))
for g in iterate_graphs(doc.body):
doc.meta += g
for triple in distilled_graph:
# len_before = len(doc.meta)
doc.meta.remove(triple)
# len_after = len(doc.meta)
if doc.meta:
self.log.warning("%s: %d triple(s) from the original metadata was "
"not found in the serialized XHTML file:\n%s",
doc.basefile, len(doc.meta),
doc.meta.serialize(format="nt").decode('utf-8').strip())
return ret
return wrapper
def handleerror(f):
"""Make sure any errors in parse are handled appropriately and do
not stop the execution of parse_all"""
@functools.wraps(f)
def wrapper(self, doc):
try:
return f(self, doc)
except DocumentRemovedError as e:
self.log.info(
"%s: Document has been removed (%s)", doc.basefile, e)
util.robust_remove(self.parsed_path(doc.basefile))
return False
except KeyboardInterrupt:
raise
except ParseError as e:
self.log.error("%s: ParseError %s", doc.basefile, e)
if (hasattr(self.config, 'fatalexceptions') and
self.config.fatalexceptions):
raise
else:
return False
except:
self.log.exception("parse of %s failed", doc.basefile)
if (hasattr(self.config, 'fatalexceptions') and
self.config.fatalexceptions):
raise
else:
return False
return wrapper
def makedocument(f):
"""Changes the signature of the parse method to expect a Document
object instead of a basefile string, and creates the object."""
@functools.wraps(f)
def wrapper(self, basefile):
doc = self.make_document(basefile)
return f(self, doc)
return wrapper
def managedparsing(f):
"""Use all standard decorators for parse() in the correct order
(makedocument, parseifneeded,timed,render"""
return makedocument(
parseifneeded(
#handleerror( # is this really a good idea?
timed(
render(f))))
def action(f):
"""Decorator that marks a class or instance method as runnable by
``manager.run`` and ``manager._run_class.``"""
f.runnable = True
return f
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""General library of small utility functions"""
import os
import sys
import subprocess
import codecs
import shutil
import locale
import re
from tempfile import mktemp
from collections import namedtuple
import filecmp
import hashlib
import datetime
import posixpath
import six
import time
from contextlib import contextmanager
if six.PY3:
from urllib.parse import urlsplit, urlunsplit
else:
from urlparse import urlsplit, urlunsplit
from email.utils import parsedate_tz
try:
from functools import cmp_to_key # new in 2.7
except ImportError:
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
import bs4
import pkg_resources
from . import errors
# We should reorganize this, maybe in util.File, util.String, and so on...
# util.Namespaces
# Set up common namespaces and suitable prefixes for them
ns = {'dc': 'http://purl.org/dc/elements/1.1/',
'dct': 'http://purl.org/dc/terms/',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'skos': 'http://www.w3.org/2004/02/skos/core#',
'xsd': 'http://www.w3.org/2001/XMLSchema#',
'foaf': 'http://xmlns.com/foaf/0.1/',
'owl': 'http://www.w3.org/2002/07/owl#',
'xhv': 'http://www.w3.org/1999/xhtml/vocab#',
'prov': 'http://www.w3.org/ns/prov-o/',
'bibo': 'http://purl.org/ontology/bibo/',
# FIXME: These non-general
# vocabularies should not be
# used in a general utility
# module like this
# 'rinfo': 'http://rinfo.lagrummet.se/taxo/2007/09/rinfo/pub#',
# 'rpubl': 'http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#',
# 'rinfoex': 'http://lagen.nu/terms#',
# 'eurlex': 'http://lagen.nu/eurlex#',
# 'ir': 'http://lagen.nu/informationretrieval#',
}
"""A mapping of well-known prefixes and their corresponding namespaces."""
# util.File
def mkdir(newdir):
if not os.path.exists(newdir):
os.makedirs(newdir)
# util.File
def ensure_dir(filename):
d = os.path.dirname(filename)
if d and not os.path.exists(d):
try:
mkdir(d)
except OSError:
# A separate process (when running multiprocessing) might
# have created the directory
pass
# util.File
def robust_rename(old, new):
"""Rename old to new no matter what (if the file exists, it's
removed, if the target dir doesn't exist, it's created)"""
# print "robust_rename: %s -> %s" % (old,new)
ensure_dir(new)
if os.path.exists(new):
#try:
os.unlink(new)
#except WindowsError:
# print "Caught WindowsError, sleeping"
# import time
# time.sleep(1)
# os.unlink(new)
# os.rename may fail across file systems
try:
shutil.move(old, new)
except IOError:
# eh, what are you gonna do?
pass
# util.File
def robust_remove(file):
if os.path.exists(file):
#try:
os.unlink(file)
# relpath was introduced in py26, but that's the lowest ver we support
# -- no need for backport
# def relpath(path, start=os.curdir):
# like os.relpath, but for urls
def relurl(url, starturl):
urlseg = urlsplit(url)
startseg = urlsplit(starturl)
urldomain = urlunsplit(urlseg[:2]+tuple('' for i in range(3)))
startdomain = urlunsplit(startseg[:2]+tuple('' for i in range(3)))
if urldomain != startdomain: # different domain, no relative url possible
return url
relpath = posixpath.relpath(urlseg.path, posixpath.dirname(startseg.path))
res = urlunsplit(('', '', relpath, urlseg.query, urlseg.fragment))
return res
# util.Sort
# FIXME: Is this even used (since the cmp param has been removed in
# py3)? Change to a key function (or just use split_numalpha)?
def numcmp(x, y):
"""Sorts ['1','10','1a', '2'] => ['1', '1a', '2', '10']"""
nx = split_numalpha(x)
ny = split_numalpha(y)
return (nx > ny) - (nx < ny) # equivalent to cmp which is not in py3
# util.Sort
def split_numalpha(s):
"""'10 a §' => [10, ' a §']"""
res = []
seg = ''
digit = s[0].isdigit()
for c in s:
if (c.isdigit() and digit) or (not c.isdigit() and not digit):
seg += c
else:
res.append(int(seg) if seg.isdigit() else seg)
seg = c
digit = not digit
res.append(int(seg) if seg.isdigit() else seg)
if isinstance(res[0],int):
res.insert(0,'') # to make sure every list has type str,int,str,int....
return res
def indent_xml_file(infile):
"""Neatifies an existing XML file in-place"""
tmpfile = mktemp()
cmd = "tidy -q -xml -asxml -utf8 -w 95 -i %s > %s" % (infile, tmpfile)
# print cmd
(ret, stdout, stderr) = runcmd(cmd)
if (ret != 0):
raise errors.TransformError(stderr)
replace_if_different(tmpfile, infile)
def indent_html_file(infile):
"""Neatifies an existing XHTML file in-place"""
tmpfile = mktemp()
cmd = "tidy -q -asxhtml -utf8 -w 95 -i %s > %s" % (infile, tmpfile)
print(("indent_html_file: Running %s " % cmd))
(ret, stdout, stderr) = runcmd(cmd)
if (ret != 0):
raise errors.TransformError(stderr)
replace_if_different(tmpfile, infile)
# util.XML
def tidy(tagsoup):
tmpin = mktemp()
tmpout = mktemp()
f = open(tmpin, "w")
if (isinstance(tagsoup, str)):
f.write(tagsoup.encode('utf-8'))
else:
f.write(tagsoup)
f.close()
cmd = "%s -q -asxhtml -utf8 %s > %s" % ("tidy", tmpin, tmpout)
(ret, stdout, stderr) = runcmd(cmd)
robust_remove(tmpin)
#if (stderr):
# print "WARN: %s" % stderr
f = codecs.open(tmpout, encoding="utf-8")
result = f.read()
f.close()
robust_remove(tmpout)
return result
# util.XML
def transform(stylesheet, infile, outfile, parameters={}, validate=True, xinclude=False, keep_unchanged=False):
"""Does a XSLT transform with the selected stylesheet. Afterwards, formats the resulting HTML tree and validates it"""
#parameters['infile'] = infile;
#parameters['outfile'] = outfile;
param_str = ""
for p in list(parameters.keys()):
# this double style quoting is needed for lawlist.xsl when
# using the tagname parameter on macos. Maybe for other
# reasons as well, I dunno
param_str += "--param %s \"'%s'\" " % (p, parameters[p])
if xinclude:
tmpfile = mktemp()
cmdline = "xmllint --xinclude --encode utf-8 %s > %s" % (
infile, tmpfile)
# print cmdline
(ret, stdout, stderr) = runcmd(cmdline)
#if (ret != 0):
# raise errors.TransformError(stderr)
infile = tmpfile
if ' ' in infile:
infile = '"%s"' % infile
tmpfile = mktemp()
cmdline = "xsltproc --nonet %s %s %s > %s" % (
param_str, stylesheet, infile, tmpfile)
print(cmdline)
(ret, stdout, stderr) = runcmd(cmdline)
if (ret != 0):
raise errors.TransformError(stderr)
# If ret == 0, then whatever's printed on stderr are just warnings
# (most likely 'failed to load external entity "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd"')
#if stderr:
# print "Transformation error: %s" % stderr
# Default behaviour is now to change the resulting file so that
# timestamps reflect the fact that the transformed file is more
# recent than the ingoing files.
if keep_unchanged:
replace_if_different(tmpfile, outfile)
else:
robust_rename(tmpfile, outfile)
if os.path.exists(tmpfile):
os.unlink(tmpfile)
if xinclude:
os.unlink(infile)
if validate:
cmdline = "xmllint --noout --nonet --nowarning --dtdvalid %s/dtd/xhtml1-strict.dtd %s" % (basepath, outfile)
(ret, stdout, stderr) = runcmd(cmdline)
if (ret != 0):
raise errors.ValidationError(stderr)
# util.Sort
def unique_list(*lists):
slots = {}
for l in lists:
for i in l:
slots[i] = 1
return list(slots.keys())
# util.Process
def runcmd(cmdline, require_success=False, cwd=None):
# print("runcmd: %r" % cmdline)
cmdline_needs_encoding = False # not needed on mac, maybe on other platforms?
if isinstance(cmdline, str) and cmdline_needs_encoding:
# FIXME: How do we detect the proper encoding? Using
# sys.stdout.encoding gives 'cp850' on windows, which is not
# what xsltproc expects
coding = 'utf-8' if sys.stdin.encoding == 'UTF-8' else 'iso-8859-1'
cmdline = cmdline.encode(coding)
p = subprocess.Popen(
cmdline, cwd=cwd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
ret = p.returncode
# print "runcmd '%s...': %s, '%s...', '%s...'" % (cmdline[:15], ret, stdout[:15], stderr[:15])
if sys.stdout.encoding:
enc = sys.stdout.encoding
else:
enc = locale.getpreferredencoding()
if isinstance(stdout, str):
stdout = stdout.decode(enc)
if isinstance(stderr, str):
stderr = stderr.decode(enc)
if (require_success and ret != 0):
# FIXME: ExternalCommandError should have fields for cmd and
# ret as well (and a sensible __str__ implementatiton)
raise errors.ExternalCommandError(stderr)
return (p.returncode, stdout, stderr)
# util.String
def normalize_space(string):
return ' '.join(string.split())
# util.File
def list_dirs(d, suffix=None, reverse=False):
"""A generator that works much like os.listdir, only recursively (and only returns files, not directories)"""
# inspired by http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/161542
#if isinstance(d,str):
# print("WARNING: list_dirs was called with str. Use unicode instead, plz")
directories = [d]
while directories:
d = directories.pop()
# for f in sorted(os.listdir(d),cmp=numcmp,reverse=reverse):
# for f in os.listdir(d):
# seg = split_numalpha(f)
# if type(seg[0]) != int or type(seg[1]) != str:
# print("\t".join([repr(x) for x in seg]))
for f in sorted(os.listdir(d), key=split_numalpha, reverse=reverse):
#print "f is %s" % f
f = "%s%s%s" % (d, os.path.sep, f)
if os.path.isdir(f):
directories.insert(0, f)
elif os.path.isfile:
if suffix and not f.endswith(suffix):
continue
else:
#print "yielding %s" % f
yield f
## util.String (or XML?)
def element_text(element):
raise NotImplementedError("Don't use this!")
# """finds the plaintext contained in a BeautifulSoup element"""
# return normalize_space(
# ''.join(
# [e for e in element.recursiveChildGenerator()
# if (isinstance(e, str) and
# not isinstance(e, bs4.Comment))]))
def indent_et(elem, level=0):
i = "\r\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
indent_node(e, level + 1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# util.String (or XML?)
def indent_node(elem, level=0):
i = "\r\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for elem in elem:
indent_node(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# util.File
def replace_if_different(newfile, oldfile, archivefile=None):
assert os.path.exists(newfile)
if not os.path.exists(oldfile):
# print "old file %s didn't exist" % oldfile
robust_rename(newfile, oldfile)
return True
elif not filecmp.cmp(newfile, oldfile):
# print "old file %s different from new file %s" % (oldfile,newfile)
if archivefile:
robust_rename(oldfile, archivefile)
robust_rename(newfile, oldfile)
return True
else:
# print "old file %s identical to new file %s" % (oldfile,newfile)
os.unlink(newfile)
return False
# util.File
def copy_if_different(src, dest):
if not os.path.exists(dest):
ensure_dir(dest)
shutil.copy2(src, dest)
elif not filecmp.cmp(src, dest):
os.unlink(dest)
shutil.copy2(src, dest)
else:
pass
# util.File
def outfile_is_newer(infiles, outfile):
"""check to see if the outfile is newer than all ingoing files
(which means there's no need to regenerate outfile)"""
if not os.path.exists(outfile):
return False
outfile_mtime = os.stat(outfile).st_mtime
for f in infiles:
# print "Testing whether %s is newer than %s" % (f, outfile)
if os.path.exists(f) and os.stat(f).st_mtime > outfile_mtime:
# print "%s was newer than %s" % (f, outfile)
return False
# print "%s is newer than %r" % (outfile, infiles)
return True
# util.file
def link_or_copy(src, dest):
ensure_dir(dest)
if os.path.lexists(dest):
os.unlink(dest)
if os.symlink:
# The semantics of symlink are not identical to copy. The
# source must be relative to the destination, not relative to
# cwd at creation time.
relsrc = relpath(src, os.path.dirname(dest))
os.symlink(relsrc, dest)
else:
copy_if_different(src, dest)
# util.string
def ucfirst(string):
l = len(string)
if l == 0:
return string
elif l == 1:
return string.upper()
else:
return string[0].upper() + string[1:]
# util.time
# From http://bugs.python.org/issue7584#msg96917
def rfc_3339_timestamp(dt):
if dt.tzinfo is None:
suffix = "-00:00"
else:
suffix = dt.strftime("%z")
suffix = suffix[:-2] + ":" + suffix[-2:]
return dt.strftime("%Y-%m-%dT%H:%M:%S") + suffix
# more-or-less the same as a HTTP-date, eg "Mon, 4 Aug 1997 02:14:00
# EST". Converts to an UTC-localized (naive) datetime object (eg
# datetime.datetime(1997, 8, 4, 7, 14) since EST is 5 hours behind UTC)
def parse_rfc822_date(httpdate):
# FIXME: the rfc822 module is deprecated and should be replaced
# with calls to the email module
# parsed_date=rfc822.parsedate_tz(httpdate)
parsed_date = parsedate_tz(httpdate)
return (datetime.datetime(*parsed_date[:7]) -
datetime.timedelta(seconds=parsed_date[9]))
# util.file
def readfile(filename, mode="r"):
with open(filename, mode=mode) as fp:
return fp.read()
# util.file
def writefile(filename, contents, encoding="utf-8"):
ensure_dir(filename)
with codecs.open(filename, "w", encoding=encoding) as fp:
fp.write(contents)
# util.string
def extract_text(html, start, end, decode_entities=True, strip_tags=True):
startidx = html.index(start)
endidx = html.rindex(end)
text = html[startidx + len(start):endidx]
if decode_entities:
from html.entities import name2codepoint
entities = re.compile("&(\w+?);")
text = entities.sub(lambda m: chr(name2codepoint[m.group(1)]), text)
if strip_tags:
# http://stackoverflow.com/a/1732454
tags = re.compile("</?\w+>")
text = tags.sub('', text)
return text
# util.string
def md5sum(filename):
c = hashlib.md5()
with open(filename, 'rb') as fp:
c.update(fp.read())
return c.hexdigest()
def merge_dict_recursive(base, other):
for (key, value) in list(other.items()):
if (isinstance(value, dict) and
(key in base) and
(isinstance(base[key], dict))):
base[key] = merge_dict_recursive(base[key], value)
else:
base[key] = value
return base
def resource_extract(resource_name, outfile, params={}):
fp = pkg_resources.resource_stream('ferenda', resource_name)
resource = fp.read().decode('utf-8')
if params:
resource = resource % params
ensure_dir(outfile)
with codecs.open(outfile,"w") as fp:
fp.write(resource)
# http://stackoverflow.com/a/7142094
def print_open_fds():
'''
return the number of open file descriptors for current process
.. warning: will only work on UNIX-like os-es.
'''
import subprocess
import os
pid = os.getpid()
procs = subprocess.check_output(
[ "lsof", '-w', '-Ff', "-p", str( pid ) ] ).decode('utf-8')
fprocs = list(filter(lambda s: s and s[ 0 ] == 'f' and s[1: ].isdigit(),
procs.split( '\n' )))
print("Open file descriptors: " + ", ".join(fprocs))
# Copied from rdfextras.utils.pathutils
def uri_leaf(uri):
"""
Get the "leaf" - fragment id or last segment - of a URI. Useful e.g. for
getting a term from a "namespace like" URI."""
for char in ('#', '/', ':'):
if uri.endswith(char):
break
if char in uri:
sep = char
leaf = uri.rsplit(char)[-1]
else:
sep = ''
leaf = uri
if sep and leaf:
return leaf
# context mgr that logs elapsed time. use like so:
#
# with util.logtime(log.debug, "Basefile %(basefile)s took %(elapsed).3f s", {'basefile':'foo'}):
# do_stuff_that_takes_some_time()
#
# results in a call like log.debug("Basefile foo took 1.324 s")
@contextmanager
def logtime(method, format="The operation took %(elapsed).3f sec", values={}):
start = time.time()
yield
values['elapsed'] = time.time() - start
method(format % values)
# Example code from http://www.diveintopython.org/
def from_roman(s):
"""convert Roman numeral to integer"""
roman_numeral_map = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
result = 0
index = 0
for numeral, integer in roman_numeral_map:
while s[index:index + len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
class URIFormatter(object):
"""Documentation goes here."""
def __init__(self, *formatters, **kwargs):
self._formatters = dict(formatters)
def format(self, parseresult):
formatter = self.formatterfor(parseresult.getName())
if formatter:
return formatter(dict(parseresult))
else:
return None
def addformatter(self, key, func):
self._formatters[key] = func
def formatterfor(self, key):
if key in self._formatters:
return self._formatters[key]
else:
return None
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
class ParseError(Exception):
"""Raised when :py:meth:`~ferenda.DocumentRepository.parse` fails in
any way.
"""
pass
class FSMStateError(ParseError):
"""Raised whenever the current state and the current symbol in a
:py:class:`~ferenda.FSMParser` configuration does not have a
defined transition.
"""
pass
class DocumentRemovedError(Exception):
"""Raised whenever a particular document has been found to be removed
-- this can happen either during
:py:meth:`~ferenda.DocumentRepository.download` or
:py:meth:`~ferenda.DocumentRepository.parse` (which may be the
case if there exists a physical document, but whose contents are
essentially a placeholder saying that the document has been
removed).
"""
pass
class PatchError(ParseError):
pass
class AttachmentNameError(ValueError):
"""Raised whenever an invalid attachment name is used with any method
of :py:class:`~ferenda.DocumentStore`."""
pass
class AttachmentPolicyError(ValueError):
"""Raised on any attempt to store an attachment using
:py:class:`~ferenda.DocumentStore` when ``storage_policy`` is not
set to ``dir``.
"""
pass
class ArchivingError(Exception):
"""Raised whenever an attempt to archive a document version using :py:meth:`~ferenda.DocumentStore.archive` fails (for example, because the archive version
already exists)."""
pass
class ValidationError(Exception):
"""Raised whenever a created document doesn't validate using the
appropriate schema."""
pass
class TransformError(Exception):
"""Raised whenever a XSLT transformation fails for any reason."""
pass
class ExternalCommandError(Exception):
"""Raised whenever any invocation of an external commmand fails for
any reason (including if the command line program doesn't exist)."""
pass
class ConfigurationError(Exception):
"""Raised when a configuration file cannot be found in it's expected
location or when it cannot be used due to corruption, file permissions
or other reasons"""
pass
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pyparsing
import six
from ferenda.elements import LinkSubject, serialize
class CitationParser(object):
"""Finds citations to documents and other resources in text
strings. Each type of citation is specified by a
:py:mod:`pyparsing` grammar, and for each found citation a URI
can be constructed using a :py:class:`~ferenda.URIFormatter`
object.
:param grammars: The grammar(s) for the citations that this
parser should find, in order of priority.
:type grammars: list of :py:class:`pyparsing.ParserElement`
Usage:
>>> from pyparsing import Word,nums
>>> rfc_grammar = ("RFC " + Word(nums).setResultsName("rfcnumber")).setResultsName("rfccite")
>>> pep_grammar = ("PEP" + Word(nums).setResultsName("pepnumber")).setResultsName("pepcite")
>>> citparser = CitationParser(rfc_grammar, pep_grammar)
>>> res = citparser.parse_string("The WSGI spec (PEP 333) references RFC 2616 (The HTTP spec)")
>>> # res is a list of strings and/or pyparsing.ParseResult objects
>>> from ferenda import URIFormatter
>>> from ferenda.elements import Link
>>> f = URIFormatter(('rfccite',
... lambda p: "http://www.rfc-editor.org/rfc/rfc%(rfcnumber)s" % p),
... ('pepcite',
... lambda p: "http://www.python.org/dev/peps/pep-0%(pepnumber)s/" % p))
>>> citparser.set_formatter(f)
>>> res = citparser.parse_recursive(["The WSGI spec (PEP 333) references RFC 2616 (The HTTP spec)"])
>>> res == ['The WSGI spec (', Link('PEP 333',uri='http://www.python.org/dev/peps/pep-0333/'), ') references ', Link('RFC 2616',uri='http://www.rfc-editor.org/rfc/rfc2616'), ' (The HTTP spec)']
True
"""
def __init__(self, *grammars):
self._grammars = []
for grammar in grammars:
self.add_grammar(grammar)
self._formatter = None
def set_formatter(self, formatter):
"""Specify how found citations are to be formatted when using
:py:meth:`~ferenda.CitationParser.parse_recursive`
:param formatter: The formatter object to use for all citations
:type formatter: :py:class:`~ferenda.URIFormatter`
"""
self._formatter = formatter
def add_grammar(self, grammar):
"""Add another grammar.
:param grammar: The grammar to add
:type grammar: :py:class:`pyparsing.ParserElement`
"""
self._grammars.append(grammar)
def parse_string(self, string):
"""Find any citations in a text string, using the configured grammars.
:param string: Text to parse for citations
:type string: str
:returns: strings (for parts of the input text that do not contain
any citation) and/or tuples (for found citation) consisting
of (string, :py:class:`pyparsing.ParseResult`)
:rtype: list
"""
# Returns a list of strings and/or tuples, where each tuple is (string,pyparsing.ParseResult)
nodes = [string]
for grammar in self._grammars:
res = []
for node in nodes:
if not isinstance(node, six.text_type):
res.append(node)
continue
matches = grammar.scanString(node)
start = 0
after = 0
for match, before, after in matches:
if before > start:
res.append(node[start:before])
res.append((node[before:after], match))
start = after
if after < len(node):
res.append(node[after:])
nodes = list(res)
return res
def parse_recursive(self, part):
"""Traverse a nested tree of elements, finding citations in
any strings contained in the tree. Found citations are marked
up as :py:class:`~ferenda.elements.Link` elements with the uri
constructed by the :py:class:`~ferenda.URIFormatter` set by
:py:meth:`~ferenda.CitationParser.set_formatter`.
:param part: The root element of the structure to parse
:type part: list
:returns: a correspondingly nested structure.
:rtype: list"""
res = []
if not isinstance(part, six.text_type):
for subpart in part:
if isinstance(subpart, six.text_type):
res.extend(self.parse_recursive(subpart))
else:
res.append(self.parse_recursive(subpart))
part[:] = res[:] # replace our exising subparts/children with the combined result of parse_recursive
return part
else: # ok, simple string
# FIXME: We need to keep track of the URI for the part
# of the document we're in, so that we can resolve
# partial/relative references
# splits a string into a list of string and ParseResult objects
nodes = self.parse_string(part)
for node in nodes:
if isinstance(node, six.text_type):
res.append(node)
elif isinstance(node, tuple):
(text, parseresult) = node
# node = self.resolve_relative(node,currentloc)
uri = self._formatter.format(parseresult)
if uri:
res.append(LinkSubject(
text, uri=uri, predicate="dct:references"))
else:
res.append(node)
# FIXME: concatenate adjacent str nodes
return res
| Python |
"""General ready-made grammars for use with CitationParser."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pyparsing import *
################################################################
#
# ferenda.citationpatterns.url
#
# Adapted from http://pyparsing.wikispaces.com/file/view/urlparse_.py/31853197/urlparse_.py
url_scheme = oneOf("http https ftp")
url_netloc = delimitedList(
Word(printables, excludeChars="/."), ".", combine=True)
# To avoid matching trailing punctuation, eg "(see http://foo.org/)":
url_tokens_not_at_end = Word(",).")
url_path_tokens = Word(printables, excludeChars="?#,).")
url_path = delimitedList(url_path_tokens, url_tokens_not_at_end, combine=True)
url_query_tokens = Word(printables, excludeChars="#,).")
url_query = delimitedList(
url_query_tokens, url_tokens_not_at_end, combine=True)
url_fragment_tokens = Word(printables, excludeChars=",).")
url_fragment = delimitedList(
url_fragment_tokens, url_tokens_not_at_end, combine=True)
url = (url_scheme.setResultsName("scheme") + Literal("://").suppress() +
url_netloc.setResultsName("netloc") +
Optional(url_path).setResultsName("path") +
Optional(Literal("?").suppress() + url_query).setResultsName("query") +
Optional(Literal("#").suppress() + url_fragment).setResultsName("fragment")).setResultsName("url")
"""Matchs any URL like 'http://example.com/ or
'https://example.org/?key=value#fragment' (note: only the
schemes/protocols 'http', 'https' and 'ftp' are supported)"""
################################################################
#
# ferenda.citationpatterns.eulaw
#
LongYear = Word(nums, exact=4)
ShortYear = Word(nums, exact=2)
Month = oneOf("januari februari mars april maj juni juli augusti september oktober november december")
DayInMonth = Word(nums, max=2)
Date = (DayInMonth + Month + LongYear)
Ordinal = Word(nums).setResultsName("Ordinal")
Year = (ShortYear | LongYear).setResultsName("Year")
Association = oneOf("EG EEG").setResultsName("Association")
Institution = Literal('rådets') | Literal(
'Europaparlamentets och rådets') | Literal('kommissionens')
LegalactType = oneOf("direktiv förordning").setResultsName("LegalactType")
Directive = Group(
Year + "/" + Ordinal + "/" + Association).setResultsName("Directive")
Regulation = Group("(" + Association + ")" + "nr" + Ordinal + "/" + Year)
# "artikel 42.1" => Article: 42, Subarticle: 1
Article = "artikel" + Word(nums).setResultsName(
"ArticleID") + Optional("." + Word(nums).setResultsName("SubarticleID"))
Legalact = Institution + LegalactType + (Directive | Regulation) + \
"av den" + Date
ArticleLegalact = Article + "i" + Legalact
eulaw = MatchFirst(
[ArticleLegalact, Legalact, Article]).setResultsName("EULegislation")
"""Matches EU Legislation references like 'direktiv 2007/42/EU'."""
| Python |
import re
from datetime import datetime
import locale
from ferenda import util
from ferenda import Describer, DocumentRepository
class W3Standards(DocumentRepository):
alias = "w3c"
start_url = "http://www.w3.org/TR/tr-status-all"
document_url_regex = "http://www.w3.org/TR/(?P<year>\d{4})/REC-(?P<basefile>.*)-(?P<date>\d+)"
document_url_template = None # no simple way of creating a url
# from a basefile alone (we also need
# the published date)
basefile_regex = None # Link text on index page do not contain basefile
parse_content_selector="body"
parse_filter_selectors=["div.toc", "div.head"]
def parse_metadata_from_soup(self, soup, doc):
d = Describer(doc.meta, doc.uri)
dct = self.ns['dct']
# dct:title
d.value(dct.title,soup.find("title").string, lang=doc.lang)
d.value(dct.identifier,doc.basefile)
# dct:abstract
abstract = soup.find(_class="abstract")
if abstract:
d.value(dct['abstract'],abstract.string, lang=doc.lang)
# dct:published
datehdr = soup.find(lambda x: x.name in ('h2','h3')
and re.search("W3C\s+Recommendation,?\s+",x.text))
# from pudb import set_trace; set_trace()
if datehdr:
datestr = " ".join(datehdr.text.split())
m = re.search("(\d+) (\w+),? (\d{4})", datestr)
if not m:
self.log.warning("%s: Couldn't parse datestr %s" %
(doc.basefile, datestr))
else:
datestr = " ".join(m.groups())
date = None
# FIXME: should this contrived workaround to get default
# (english) locale for strptime be put in a util class?
l = locale.getlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL,'C')
try:
# 17 December 1996
date = datetime.strptime(datestr,"%d %B %Y")
except ValueError:
try:
# 17 Dec 1996
date = datetime.strptime(datestr,"%d %b %Y")
except ValueError:
self.log.warning("%s: Could not parse datestr %s" %
(doc.basefile, datestr))
finally:
locale.setlocale(locale.LC_ALL,l)
if date:
d.value(dct.issued, date)
# dct:editor
editors = soup.find("dt", text=re.compile("Editors?:"))
if editors:
for editor in editors.find_next_siblings("dd"):
editor_string = " ".join(editor.stripped_strings)
editor_name = editor_string.split(", ")[0]
d.value(dct.editor, editor_name)
# assure we got exactly one of each of the required properties
for required in (dct.title,dct.issued):
d.getvalue(required) # throws KeyError if not found (or more than one)
| Python |
from ferenda import DocumentRepository
class PEP(DocumentRepository):
module_dir = "pep"
start_url = "http://hg.python.org/peps"
document_url_template = "http://hg.python.org/peps/file/tip/pep-%(basefile)s.txt"
def download(self):
hg_clone_path = os.sep.join(self.config.datadir, self.alias, 'clone')
if os.path.exists(hg_clone_path):
self.log.debug("Pulling latest changes")
util.runcmd("hg pull",cwd=hg_clone_path)
self.log.debug("Updating local clone")
util.runcmd("hg update",cwd=hg_clone_path)
else:
hg_clone_parent = os.sep.join(self.config.datadir, self.alias)
util.runcmd("hg clone %s clone" % self.start_url,
cwd=hg_clone_parent)
pass
new_last_rev = None
cmd = "LANGUAGE=C hg log -v"
for rev in "LANGUAGE=C hg log -v":
if not new_last_rev:
new_last_rev = rev.id
if rev > self.config.last_rev:
for f in rev.files: # rev.files only contain proper pep files
"hg cat -r %s > downloaded/%s-r%s.txt" % f, basefile(f), rev.id
else:
self.config.last_rev = new_last_rev
break
| Python |
# flake8: noqa
from .w3c import W3Standards
from .rfc import RFC
from .pep import PEP
| Python |
# -*- coding: utf-8 -*-
import re
import os
from datetime import datetime, date
from itertools import islice
import requests
import requests.exceptions
import six
from rdflib import Graph,Literal
from ferenda import DocumentRepository
from ferenda.errors import DocumentRemovedError, ParseError
from ferenda.decorators import recordlastdownload, managedparsing
from ferenda import TextReader, Describer, FSMParser, CitationParser, URIFormatter
from ferenda.elements import Body, Heading, Preformatted, Paragraph, UnorderedList, ListItem, Section, Subsection, Subsubsection, UnicodeElement, CompoundElement, serialize
class RFCHeader(UnicodeElement): pass
class DocTitle(UnicodeElement): pass
class Pagebreak(CompoundElement): pass
class PreambleSection(CompoundElement):
tagname = "div"
def _get_classname(self):
return self.__class__.__name__.lower()
classname = property(_get_classname)
def as_xhtml(self, uri):
element = super(PreambleSection, self).as_xhtml(uri)
element.set('property', 'dct:title')
element.set('content', self.title)
element.set('typeof', 'bibo:DocumentPart')
return element
class RFC(DocumentRepository):
alias = "rfc"
start_url = "http://www.ietf.org/download/rfc-index.txt"
document_url_template = "http://tools.ietf.org/rfc/rfc%(basefile)s.txt"
document_url_regex = "http://tools.ietf.org/rfc/rfc(?P<basefile>\w+).txt"
downloaded_suffix = ".txt"
namespaces = ('rdf', # always needed
'dct', # title, identifier, etc (could be replaced by equiv bibo prop?)
'bibo', # Standard and DocumentPart classes, chapter prop
'xsd', # datatypes
('rfc','http://example.org/ontology/rfc/')
)
@recordlastdownload
def download(self, basefile=None):
if basefile and self.document_url_template:
return self.download_single(basefile)
res = requests.get(self.start_url)
indextext = res.text
reader = TextReader(ustring=indextext,linesep=TextReader.UNIX) # see TextReader class
iterator = reader.getiterator(reader.readparagraph)
if 'downloadmax' in self.config or 'FERENDA_DOWNLOADMAX' in os.environ:
if 'downloadmax' in self.config:
maxdoc = int(self.config.downloadmax)
else:
maxdoc = int(os.environ['FERENDA_DOWNLOADMAX'])
self.log.info("Only downloading max %s documents" % maxdoc)
links = islice(self.download_get_basefiles(iterator), maxdoc)
else:
links = self.download_get_basefiles(iterator)
for (basefile,url) in links:
try:
if not os.path.exists(self.store.downloaded_path(basefile)):
self.download_single(basefile)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
# create a empty dummy file in order to
# avoid looking for it over and over again:
with open(self.store.downloaded_path(basefile), "w"):
pass
def download_get_basefiles(self, source):
for p in reversed(list(source)):
if re.match("^(\d{4}) ",p): # looks like a RFC number
if not "Not Issued." in p: # Skip RFC known to not exist
basefile = str(int(p[:4])) # eg. '0822' -> '822'
yield (basefile, None)
@staticmethod # so as to be easily called from command line
def get_parser():
# recognizers, constructors and helpers are created as nested
# ordinary functions, but could just as well be staticmethods
# (or module-global functions)
def is_rfcheader(parser,chunk=None):
if not chunk:
chunk = parser.reader.peek()
(leftlines, rightlines, linelens) = _splitcolumns(chunk)
# all rfc headers are at least 3 lines long
if len(linelens) < 3:
return False
targetlen = linelens[0]
for (idx, length) in enumerate(linelens):
if rightlines[idx] == "" and length > 40:
return False
elif rightlines[idx] != "" and length != targetlen:
return False
return True
# FIXME: use this in parse_header as well
def _splitcolumns(chunk):
linelens = []
leftlines = []
rightlines = []
for line in chunk.split("\n"):
linelens.append(len(line))
if " " in line:
(left,right) = line.split(" ",1)
else:
(left, right) = line, ""
leftlines.append(left)
rightlines.append(right)
return (leftlines, rightlines, linelens)
def is_doctitle(parser,chunk=None): return True
def is_pagebreak(parser,chunk=None):
if not chunk:
chunk = parser.reader.peek()
return ('\f' in chunk)
def is_header(parser,chunk=None):
if not chunk:
chunk = parser.reader.peek()
stripchunk = chunk.strip()
# a header should be non-emtpy, be on a single line, not
# end with "." and not start with an indent.
if ((stripchunk != "") and
(len(stripchunk.split("\n")) == 1) and
(not stripchunk.endswith('.')) and
(not chunk.startswith(' '))):
return True
def is_section(parser, chunk=None):
(ordinal,title) = analyze_sectionstart(parser,chunk)
return section_segments_count(ordinal) == 1
def is_subsection(parser, chunk=None):
(ordinal,title) = analyze_sectionstart(parser,chunk)
return section_segments_count(ordinal) == 2
def is_subsubsection(parser, chunk=None):
(ordinal,title) = analyze_sectionstart(parser,chunk)
return section_segments_count(ordinal) == 3
def is_preformatted(parser, chunk=None):
if not chunk:
chunk = parser.reader.peek()
# all paragraphs start with a three space indent -- start
# by removing this
stripped = "\n".join([x[3:] for x in chunk.split("\n")])
# replace double spaces after end of sentences to avoid
# false positives:
stripped = stripped.replace(". ", ". ")
# If any double spaces left, probably preformatted text
# (eg. tables etc). Same if several periods are present
# (indicative of leaders in TOCs)
return (" " in stripped or
"...." in stripped or
". . . " in stripped)
def is_bnf(parser,chunk=None):
if not chunk:
chunk = parser.reader.peek()
return (is_preformatted(parser,chunk) and " = " in chunk)
def is_paragraph(parser, chunk=None):
return True
def is_ul_listitem(parser, chunk=None):
if not chunk:
chunk = parser.reader.peek()
return chunk.strip().startswith("o ")
def is_definition_title(parser, chunk=None):
# looks like header but starts indented
return False
def is_definition(parser, chunk=None):
# entire p is indented 6 spaces instead of 3. But if it
# follows a ul li, problably continuation of that.
return False
def make_body(parser):
return p.make_children(Body())
setattr(make_body,'newstate','body')
def make_preamble_section(parser):
s = PreambleSection(title=parser.reader.next())
return p.make_children(s)
setattr(make_preamble_section,'newstate','preamble-section')
def skip_pagebreak(parser):
chunk = parser.reader.next()
lastline = chunk.split("\n")[-1]
parts = re.split(" +", lastline)
if len(parts) > 2:
return Pagebreak(shorttitle=parts[1])
else:
return None
def make_header(parser):
chunk = parser.reader.next()
h = Heading(chunk.strip())
return h
def make_paragraph(parser):
chunk = p.reader.next()
return Paragraph([" ".join(chunk.split())])
def make_preformatted(parser):
chunk = p.reader.next()
return Preformatted([chunk])
def make_bnf(parser):
chunk = p.reader.next()
return Preformatted([chunk],**{'class':'bnf'})
def make_section(parser):
(secnumber, title) = analyze_sectionstart(parser,parser.reader.next())
s = Section(ordinal=secnumber,title=title,uri=None,meta=None)
return parser.make_children(s)
setattr(make_section,'newstate','section')
def make_subsection(parser):
(secnumber, title) = analyze_sectionstart(parser,parser.reader.next())
s = Subsection(ordinal=secnumber,title=title,uri=None,meta=None)
return parser.make_children(s)
setattr(make_subsection,'newstate','subsection')
def make_subsubsection(parser):
(secnumber, title) = analyze_sectionstart(parser,parser.reader.next())
s = Subsubsection(ordinal=secnumber,title=title,uri=None,meta=None)
return parser.make_children(s)
setattr(make_subsubsection,'newstate','subsubsection')
def make_unordered_list(parser):
(listtype,ordinal,separator,rest) = analyze_listitem(parser.reader.peek())
ol = UnorderedList(type=listtype) # should
ol.append(parser.make_child(make_listitem,"listitem"))
return parser.make_children(ol)
setattr(make_unordered_list,'newstate','unorderedlist')
def make_listitem(parser):
chunk = parser.reader.next()
(listtype,ordinal,separator,rest) = analyze_listitem(chunk)
li = ListItem(ordinal=ordinal)
li.append(rest)
return parser.make_children(li)
setattr(make_listitem,'newstate','listitem')
def make_rfcheader(parser):
headerchunk = parser.reader.next()
if is_rfcheader(parser):
headerchunk += "\n" + parser.reader.next()
return RFCHeader(headerchunk)
def make_doctitle(parser):
return DocTitle(parser.reader.next())
# Some helpers for the above
def section_segments_count(s):
return ((s is not None) and
len(list(filter(None,s.split(".")))))
# Matches
# "1 Blahonga" => ("1","Blahonga")
# "1.2.3. This is a subsubsection" => ("1.2.3", "This is a subsection")
re_sectionstart = re.compile("^(\d[\.\d]+) +(.*[^\.])$").match
def analyze_sectionstart(parser,chunk=None):
if not chunk:
chunk = parser.reader.peek()
m = re_sectionstart(chunk)
if m:
return (m.group(1).rstrip("."), m.group(2))
else:
return (None,chunk)
def analyze_listitem(chunk):
# returns: same as list-style-type in CSS2.1, sans
# 'georgian', 'armenian' and 'greek', plus 'dashed'
listtype = ordinal = separator = rest = None
# FIXME: Tighten these patterns to RFC conventions
# match "1. Foo..." or "14) bar..." but not "4 This is a heading"
# m = re.match('^(\d+)([\.\)]) +',chunk)
# if m:
# if chunk.startswith("0"):
# listtype="decimal-leading-zero"
# else:
# listtype="decimal"
# (ordinal,separator) = m.groups()
# rest = chunk[m.end():]
# return (listtype,ordinal,separator,rest)
#
# # match "IX. Foo… or "vii) bar…" but not "vi is a sucky
# # editor" or "MMXIII is the current year"
# m = re.match('^([IVXivx]+)([\.\)]) +', chunk)
# if m:
# if chunk[0].islower():
# listtype = 'lower-roman'
# else:
# listtype = 'upper-roman'
# (ordinal,separator) = m.groups()
# rest = chunk[m.end():]
# return (listtype,ordinal,separator,rest)
#
# # match "a. Foo… or "z) bar…" but not "to. Next sentence…"
# m = re.match('^([A-Za-z])([\.\)]) +', chunk)
# if m:
# if chunk[0].islower():
# listtype = 'lower-alpha'
# else:
# listtype = 'upper-alpha'
# (ordinal,separator) = m.groups()
# rest = chunk[m.end():]
# return (listtype,ordinal,separator,rest)
#
if chunk.startswith(" o "):
return ("disc",None,None,chunk[6:])
return (listtype,ordinal,separator,chunk) # None * 3
p = FSMParser()
p.set_recognizers(is_pagebreak,
is_rfcheader,
is_doctitle,
is_section,
is_subsection,
is_subsubsection,
is_header,
is_ul_listitem,
is_preformatted,
is_definition_title,
is_definition,
is_paragraph)
# start_state: "body" or "rfcheader", then "title", then
# "preamble" (consisting of preamblesections that has title
# (eg "Abtract", "Status of This Memo" + content), then "section".
commonstates = ("section","subsection","subsubsection")
p.set_transitions({("body", is_rfcheader):(make_rfcheader,"doctitle"),
("doctitle", is_doctitle):(make_doctitle,"preamble"),
("preamble", is_header):(make_preamble_section,"preamble-section"),
("preamble-section", is_paragraph):(make_paragraph,None),
("preamble-section", is_header):(False,None),
("preamble-section", is_pagebreak):(skip_pagebreak,None),
("preamble-section", is_section):(False,"after-preamble"),
("after-preamble", is_section):(make_section, "section"),
("section", is_subsection): (make_subsection,"subsection"),
("section", is_section): (False,None),
("subsection", is_subsubsection): (make_subsubsection, "subsubsection"),
("subsection", is_subsection):(False,None),
("subsection", is_section):(False,None),
("subsubsection", is_subsubsection):(False,None),
("subsubsection", is_subsection):(False,None),
("subsubsection", is_section):(False,None),
(commonstates, is_ul_listitem):(make_unordered_list, "ul-list"),
("ul-list", is_ul_listitem):(make_listitem, "listitem"),
("ul-list", is_paragraph):(False,None),
("listitem", is_paragraph):(False,None),
(commonstates, is_bnf):(make_bnf,None),
(commonstates, is_preformatted):(make_preformatted,None),
(commonstates, is_paragraph):(make_paragraph,None),
(commonstates, is_pagebreak):(skip_pagebreak,None),
})
p.initial_state = "body"
p.initial_constructor = make_body
return p
@managedparsing
def parse(self, doc):
reader = TextReader(self.store.downloaded_path(doc.basefile),
linesep=TextReader.UNIX)
# Some more preprocessing: Remove the faux-bold formatting
# used in some RFCs (using repetitions of characters
# interleaved with backspace control sequences). Note: that
# is '\b' as in backspace, not r'\b' as in word boundary
# docstring = re.sub('.\b','',docstring)
cleanparagraphs = (re.sub('.\b','',x) for x in
reader.getiterator(reader.readparagraph))
parser = self.get_parser()
self.config.fsmdebug = 'FERENDA_FSMDEBUG' in os.environ
parser.debug = self.config.fsmdebug
doc.body = parser.parse(cleanparagraphs)
header = doc.body.pop(0) # body.findByClass(RFCHeader)
title = " ".join(doc.body.pop(0).split()) # body.findByClass(DocHeader)
for part in doc.body:
if isinstance(part,PreambleSection) and part.title == "Table of Contents":
doc.body.remove(part)
break
# create (RDF) metadata for document Note: The provided
# basefile may be incorrect -- let whatever is in the header
# override
realid = self.get_rfc_num(header)
doc.uri = self.canonical_uri(realid)
desc = Describer(doc.meta, doc.uri)
desc.rdftype(self.ns['bibo'].Standard)
desc.value(self.ns['dct'].title, title, lang="en")
self.parse_header(header,desc)
doc.lang = "en"
# process body - remove the temporary Pagebreak objects, after
# having extracted the shortTitle found in them
shorttitle = self.cleanup_body(doc.body)
if shorttitle and (desc.getvalue(self.ns['dct'].title) != shorttitle):
desc.value(self.ns['bibo'].shortTitle, shorttitle, lang="en")
# process body - add good metadata
from pyparsing import Word,alphanums
bibref_cite = ("[" + Word(alphanums).setResultsName("ref") + "]").setResultsName("bibref")
citparser = CitationParser(bibref_cite)
citparser.set_formatter(URIFormatter(("bibref", lambda p: "#bib-%(ref)s" % p),
))
doc.body = citparser.parse_recursive(doc.body)
self.decorate_bodyparts(doc.body,doc.uri)
def decorate_bodyparts(self,part,baseuri):
if isinstance(part,six.text_type):
return
if isinstance(part,(Section, Subsection, Subsubsection)):
# print("Decorating %s %s" % (part.__class__.__name__,part.ordinal))
part.uri = "%s#S%s" % (baseuri,part.ordinal)
part.meta = self.make_graph()
desc = Describer(part.meta,part.uri)
desc.rdftype(self.ns['bibo'].DocumentPart)
desc.value(self.ns['dct'].title, Literal(part.title,lang="en"))
desc.value(self.ns['bibo'].chapter, part.ordinal)
# desc.value(self.ns['dct'].isPartOf, part.parent.uri) # implied
for subpart in part:
self.decorate_bodyparts(subpart,baseuri)
def cleanup_body(self,part):
shorttitle = None
newparts = [] # a copy of the children w/o any Pagebreaks
for subpart in part:
if isinstance(subpart,Pagebreak):
shorttitle = subpart.shorttitle
else:
if isinstance(subpart,six.text_type):
pass
else:
short = self.cleanup_body(subpart)
if shorttitle is None:
shorttitle = short
newparts.append(subpart)
part[:] = newparts
return shorttitle
def get_rfc_num(self, header):
lines = header.split("\n")
left = [x.split(" ",1)[0].strip() for x in lines]
for line in left[1:]:
if ":" not in line:
continue
(key,val) = line.split(": ")
if key == "Request for Comments":
return val
raise ParseError("Couldn't find RFC number in header")
def parse_header(self,header,desc):
# split header in left-hand and right-hand side, and line by line
lines = header.split("\n")
left = [x.split(" ",1)[0].strip() for x in lines]
right= [x.split(" ",1)[1].strip() for x in lines if " " in x]
# first line of lefthand side is publishing organization (?)
desc.value(self.ns['dct'].publisher, left[0])
# following lefthand side are key-value headers
for line in left[1:]:
if line.strip() == "":
continue
if ": " not in line:
self.log.warning("Cannot treat %r as a key-value header" % line)
continue
(key,value) = line.split(": ")
if key == "Request for Comments":
desc.value(self.ns['dct'].identifier, "RFC %s" % value)
elif key == "Category":
desc.value(self.ns['dct'].category, value)
elif key == "ISSN":
desc.value(self.ns['dct'].issn, value)
elif key == "Updates":
for valuepart in value.split(", "):
uri = self.canonical_uri(valuepart)
desc.rel(self.ns['dct'].updates, uri)
elif key == "Obsoletes":
for valuepart in value.split(", "):
uri = self.canonical_uri(valuepart)
desc.rel(self.ns['dct'].updates, uri)
elif key == "BCP":
desc.value(self.ns['rfc'].BCP, value)
elif key == "STD":
desc.value(self.ns['rfc'].STD, value)
elif key == "FYI":
desc.value(self.ns['rfc'].FYI, value)
else:
# Unknown headers seen: BCP, STD, FYI
self.log.warning("Unknown header key %s (value %s)" % (key,value))
# For right hand side, any line beginning with a single letter
# followed by '. ' is probably a name
for line in right:
if re.match("[A-Z]\. ",line):
desc.value(self.ns['dct'].creator, line)
elif re.match("\w+ \d{4}$",line):
# NOTE: this requires english locale!
dt = datetime.strptime(line, "%B %Y")
d = date(dt.year,dt.month,dt.day)
desc.value(self.ns['dct'].issued, d)
else:
# company affiliation - include that separate from
# personal author identity
desc.value(self.ns['dct'].rightsHolder, line)
| Python |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import sys
import os
import re
import datetime
from operator import attrgetter
from ferenda import DocumentRepository
from ferenda import util
from ferenda import legaluri
# from ferenda import LegalRef
__version__ = (0, 1)
__author__ = "Staffan Malmgren <staffan@tomtebo.org>"
class SKVFS(DocumentRepository):
module_dir = "skvfs"
source_encoding = "utf-8"
# start_url = "http://www.skatteverket.se/rattsinformation/foreskrifter/tidigarear.4.1cf57160116817b976680001670.html"
# This url contains slightly more (older) links (and a different layout)?
start_url = "http://www.skatteverket.se/rattsinformation/lagrummet/foreskriftergallande/aldrear.4.19b9f599116a9e8ef3680003547.html"
# also consolidated versions
# http://www.skatteverket.se/rattsinformation/lagrummet/foreskrifterkonsoliderade/aldrear.4.19b9f599116a9e8ef3680004242.html
downloaded_suffix = ".pdf"
# URL's are highly unpredictable. We must find the URL for every
# resource we want to download, we cannot transform the resource
# id into a URL
def download_everything(self, usecache=False):
self.log.info("Starting at %s" % self.start_url)
self.browser.open(self.start_url)
years = {}
for link in sorted(self.browser.links(text_regex=r'^\d{4}$'),
key=attrgetter('text')):
year = int(link.text)
# Documents for the years 1985-2003 are all on one page
# (with links leading to different anchors). To avoid
# re-downloading stuff when usecache=False, make sure we
# haven't seen this url (sans fragment) before
url = link.absolute_url.split("#")[0]
if year not in years and url not in list(years.values()):
self.download_year(year, url, usecache=usecache)
years[year] = url
# just download the most recent year
def download_new(self):
self.log.info("Starting at %s" % self.start_url)
self.browser.open(self.start_url)
link = sorted(self.browser.links(text_regex=r'^\d{4}$'),
key=attrgetter('text'), reverse=True)[0]
self.download_year(int(link.text), link.absolute_url, usecache=True)
def download_year(self, year, url, usecache=False):
self.log.info("Downloading year %s from %s" % (year, url))
self.browser.open(url)
for link in (self.browser.links(text_regex=r'FS \d+:\d+')):
if "bilaga" in link.text:
self.log.warning("Skipping attachment in %s" % link.text)
continue
# sanitize trailing junk
linktext = re.match("\w+FS \d+:\d+", link.text).group(0)
# something like skvfs/2010/23 or rsfs/1996/9
basefile = linktext.strip(
).lower().replace(" ", "/").replace(":", "/")
self.download_single(
basefile, link.absolute_url, usecache=usecache)
def download_single(self, basefile, url, usecache=False):
self.log.info("Downloading %s from %s" % (basefile, url))
self.document_url = url + "#%s"
html_downloaded = super(
SKVFS, self).download_single(basefile, usecache)
year = int(basefile.split("/")[1])
if year >= 2007: # download pdf as well
filename = self.downloaded_path(basefile)
pdffilename = os.path.splitext(filename)[0] + ".pdf"
if not usecache or not os.path.exists(pdffilename):
soup = self.soup_from_basefile(basefile)
pdflink = soup.find(href=re.compile('\.pdf$'))
if not pdflink:
self.log.debug("No PDF file could be found")
return html_downloaded
pdftext = util.element_text(pdflink)
pdfurl = urllib.parse.urljoin(url, pdflink['href'])
self.log.debug("Found %s at %s" % (pdftext, pdfurl))
pdf_downloaded = self.download_if_needed(pdfurl, pdffilename)
return html_downloaded and pdf_downloaded
else:
return False
else:
return html_downloaded
if __name__ == "__main__":
SKVFS.run()
| Python |
# -*- coding: utf-8 -*-
from ferenda import DocumentRepository
import re
class Kommitte(DocumentRepository):
module_dir = "komm"
start_url = "http://62.95.69.15/cgi-bin/thw?${HTML}=komm_lst&${OOHTML}=komm_doc&${SNHTML}=komm_err&${MAXPAGE}=26&${TRIPSHOW}=format%3DTHW&${BASE}=KOMM"
source_encoding = "iso-8859-1"
document_url = "http://62.95.69.15/cgi-bin/thw?${HTML}=komm_lst&${OOHTML}=komm_doc&${TRIPSHOW}=format=THW&${APPL}=KOMM&${BASE}=KOMM&BET=%s"
# this is almost identical to DirTrips. Can we refactor similar
# things to a TripsRepository base class?
re_basefile = re.compile(r'(\w+ \d{4}:\w+)', re.UNICODE)
def download_everything(self, usecache=False):
self.log.info("Starting at %s" % self.start_url)
self.browser.open(self.start_url)
done = False
pagecnt = 1
while not done:
self.log.info('Result page #%s' % pagecnt)
for link in self.browser.links(text_regex=self.re_basefile):
basefile = self.re_basefile.search(link.text).group(1)
if not isinstance(basefile, str):
basefile = str(basefile, encoding=self.source_encoding)
url = self.document_url % urllib.parse.quote(link.text)
self.download_single(basefile, usecache=usecache, url=url)
try:
self.browser.follow_link(text='Fler poster')
pagecnt += 1
except LinkNotFoundError:
self.log.info(
'No next page link found, this was the last page')
done = True
def parse_from_soup(self, soup, basefile):
pre = soup.findAll("pre")[-1]
text = ''.join(pre.findAll(text=True))
print(text)
# End result something like this
#
# <http://rinfo.lagrummet.se/komm/a/1991:03> a :Kommittebeskrivning
# dct:identifier "A 1991:03" ;
# :tillkalladAr "1991" ;
# :lopnummer "03";
# :kommittestatus "Avslutad";
# :avslutadAr "1993";
# :departement <http://rinfo.lagrummet.se/publ/org/Arbetsmarknadsdepartementet>;
# :kommittedirektiv <http://rinfo.lagrummet.se/publ/dir/1991:75> ,
# <http://rinfo.lagrummet.se/publ/dir/1992:33> ,
# :betankanden <http://rinfo.lagrummet.se/publ/bet/sou/1993:81> .
#
# <http://rinfo.lagrummet.se/publ/bet/sou/1993:81> dct:title "Översyn av arbetsmiljölagen";
if __name__ == "__main__":
Kommitte.run()
| Python |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import re
from . import Regeringen
# are there other sources? www.sou.gov.se directs here,
# anyway. Possibly
# https://www.riksdagen.se/Webbnav/index.aspx?nid=3282, but it's
# unsure whether they have any more information, or they just import
# from regeringen.se (the data quality suggests some sort of auto
# import). Some initial comparisons cannot find data that riksdagen.se
# has that regeringen.se doesn't
class SOU(Regeringen):
module_dir = "sou"
re_basefile_strict = re.compile(r'SOU (\d{4}:\d+)')
re_basefile_lax = re.compile(r'(?:SOU|) ?(\d{4}:\d+)', re.IGNORECASE)
def __init__(self, options):
super(SOU, self).__init__(options)
self.document_type = self.SOU
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals,print_function
"""Hanterar (konsoliderade) f\xf6rfattningar i SFS fr\xe5n Regeringskansliet
r\xe4ttsdatabaser.
"""
# system libraries (+ six)
from collections import defaultdict
from datetime import datetime, date
from tempfile import mktemp
from time import time, sleep
import cgi
import codecs
import difflib
import logging
import os
import re
import sys
import xml.etree.ElementTree as PET
import xml.etree.cElementTree as ET
import six
from six.moves import html_parser
if six.PY3:
from urllib.parse import quote, unquote
else:
from urllib import quote, unquote # NOQA
try:
from collections import OrderedDict
except ImportError:
# if on python 2.6
from ordereddict import OrderedDict
# 3rdparty libs
from rdflib import Graph, Namespace, URIRef, RDF, RDFS, Literal
import requests
import lxml.html
from lxml.builder import ElementMaker
import bs4
# my own libraries
from ferenda import util
from ferenda import decorators
from ferenda.elements import CompoundElement
from ferenda.elements import DateElement
from ferenda.elements import MapElement
from ferenda.elements import OrdinalElement
from ferenda.elements import PredicateType
from ferenda.elements import TemporalElement
from ferenda.elements import UnicodeElement
from ferenda.legalref import LegalRef, Link, LinkSubject
from ferenda import DocumentEntry, DocumentStore
from ferenda import TextReader, Describer
from ferenda.errors import DocumentRemovedError, ParseError
from . import SwedishLegalSource, RPUBL
E = ElementMaker(namespace="http://www.w3.org/1999/xhtml")
__shortdesc__ = "F\xf6rfattningar i SFS"
# Objektmodellen f\xf6r en f\xf6rfattning \xe4r uppbyggd av massa byggstenar
# (kapitel, paragrafen, stycken m.m.) d\xe4r de allra flesta \xe4r n\xe5gon
# form av lista. \xc4ven stycken \xe4r listor, dels d\xe5 de kan inneh\xe5lla
# lagrumsh\xe4nvisningar i den l\xf6pande texten, som uttrycks som
# Link-objekt mellan de vanliga unicodetextobjekten, dels d\xe5 de kan
# inneh\xe5lla en punkt- eller nummerlista.
#
# Alla klasser \xe4rver fr\xe5n antingen CompoundElement (som \xe4r en list
# med lite extraegenskaper), UnicodeElement (som \xe4r en unicode med
# lite extraegenskaper) eller MapElement (som \xe4r ett dict med lite
# extraegenskaper).
#
# De kan \xe4ven \xe4rva fr\xe5n TemporalElement om det \xe4r ett objekt som kan
# upph\xe4vas eller tr\xe4da ikraft (exv paragrafer och rubriker, men inte
# enskilda stycken) och/eller OrdinalElement om det \xe4r ett objekt
# som har n\xe5n sorts l\xf6pnummer, dvs kan sorteras p\xe5 ett meningsfullt
# s\xe4tt (exv kapitel och paragrafer, men inte rubriker).
class Forfattning(CompoundElement, TemporalElement):
"""Grundklass f\xf6r en konsoliderad f\xf6rfattningstext."""
tagname = "body"
classname = "konsolideradforfattning"
# Rubrike \xe4r en av de f\xe5 byggstenarna som faktiskt inte kan inneh\xe5lla
# n\xe5got annat (det f\xf6rekommer "aldrig" en h\xe4nvisning i en
# rubriktext). Den \xe4rver allts\xe5 fr\xe5n UnicodeElement, inte
# CompoundElement.
class Rubrik(UnicodeElement, TemporalElement):
"""En rubrik av n\xe5got slag - kan vara en huvud- eller underrubrik
i l\xf6ptexten, en kapitelrubrik, eller n\xe5got annat"""
fragment_label = "R"
def _get_tagname(self):
if 'type' in self and self.type == "underrubrik":
return "h3"
else:
return "h2"
tagname = property(_get_tagname, "Docstring here")
def __init__(self, *args, **kwargs):
self.id = kwargs.get("id", None)
self.uri = kwargs.get("uri", None)
super(Rubrik, self).__init__(*args, **kwargs)
class Stycke(CompoundElement):
fragment_label = "S"
tagname = "p"
typeof = "rinfoex:Stycke" # not defined by the rpubl vocab
def __init__(self, *args, **kwargs):
self.id = kwargs.get("id", None)
self.uri = kwargs.get("uri", None)
super(Stycke, self).__init__(*args, **kwargs)
class Strecksatslista (CompoundElement):
tagname = "ul"
classname = "strecksatslista"
class NumreradLista (CompoundElement):
tagname = "ul" # These list are not always monotonically
# increasing, which a <ol> requrires
classname = "numreradlista"
class Bokstavslista (CompoundElement):
tagname = "ul" # See above
classname = "bokstavslista"
# class Preformatted(UnicodeElement):
# pass
class Tabell(CompoundElement):
tagname = "table"
class Tabellrad(CompoundElement, TemporalElement):
tagname = "tr"
class Tabellcell(CompoundElement):
tagname = "td"
class Avdelning(CompoundElement, OrdinalElement):
tagname = "div"
fragment_label = "A"
def __init__(self, *args, **kwargs):
self.id = kwargs.get("id", None)
self.uri = kwargs.get("uri", None)
super(Avdelning, self).__init__(*args, **kwargs)
class UpphavtKapitel(UnicodeElement, OrdinalElement):
"""Ett UpphavtKapitel \xe4r annorlunda fr\xe5n ett upph\xe4vt Kapitel p\xe5 s\xe5
s\xe4tt att inget av den egentliga lagtexten finns kvar, bara en
platsh\xe5llare"""
pass
class Kapitel(CompoundElement, OrdinalElement):
fragment_label = "K"
dagname = "div"
typeof = "rpubl:Kapitel" # FIXME: This is qname string, not
# rdflib.URIRef (which would be better),
# since as_xhtml doesn't have access to
# a graph with namespace bindings, which
# is required to turn a URIRef to a
# qname
def __init__(self, *args, **kwargs):
self.id = kwargs.get("id", None)
self.uri = kwargs.get("uri", None)
super(Kapitel, self).__init__(*args, **kwargs)
class UpphavdParagraf(UnicodeElement, OrdinalElement):
pass
# en paragraf har inget "eget" v\xe4rde, bara ett nummer och ett eller
# flera stycken
class Paragraf(CompoundElement, OrdinalElement):
fragment_label = "P"
tagname = "div"
typeof = "rpubl:Paragraf" # FIXME: see above
def __init__(self, *args, **kwargs):
self.id = kwargs.get("id", None)
self.uri = kwargs.get("uri", None)
super(Paragraf, self).__init__(*args, **kwargs)
# kan inneh\xe5lla n\xe4stlade numrerade listor
class Listelement(CompoundElement, OrdinalElement):
fragment_label = "N"
tagname = "li"
def __init__(self, *args, **kwargs):
self.id = kwargs.get("id", None)
self.uri = kwargs.get("uri", None)
super(Listelement, self).__init__(*args, **kwargs)
class Overgangsbestammelser(CompoundElement):
def __init__(self, *args, **kwargs):
self.rubrik = kwargs.get('rubrik', '\xd6verg\xe5ngsbest\xe4mmelser')
super(Overgangsbestammelser, self).__init__(*args, **kwargs)
class Overgangsbestammelse(CompoundElement, OrdinalElement):
tagname = "div"
fragment_label = "L"
def __init__(self, *args, **kwargs):
self.id = kwargs.get("id", None)
self.uri = kwargs.get("uri", None)
super(Overgangsbestammelse, self).__init__(*args, **kwargs)
class Bilaga(CompoundElement):
fragment_label = "B"
def __init__(self, *args, **kwargs):
self.id = kwargs.get("id", None)
self.uri = kwargs.get("uri", None)
super(Bilaga, self).__init__(*args, **kwargs)
class Register(CompoundElement):
"""Inneh\xe5ller lite metadata om en grundf\xf6rfattning och dess
efterf\xf6ljande \xe4ndringsf\xf6rfattningar"""
tagname = "div"
classname = "register"
def __init__(self, *args, **kwargs):
self.rubrik = kwargs.get('rubrik', None)
super(Register, self).__init__(*args, **kwargs)
def as_xhtml(self, uri=None):
res = super(Register, self).as_xhtml()
res.insert(0, E('h1', self.rubrik))
return res
# class Registerpost(MapElement):
class Registerpost(CompoundElement):
"""Metadata for a particular Grundforfattning or Andringsforfattning in the form of a rdflib graph, optionally with a Overgangsbestammelse."""
tagname = "div"
classname = "registerpost"
def __init__(self, *args, **kwargs):
self.id = kwargs.get("id", None)
self.uri = kwargs.get("uri", None)
super(Registerpost, self).__init__(*args, **kwargs)
def as_xhtml(self, uri=None):
# FIXME: Render this better (particularly the rpubl:andring
# property -- should be parsed and linked)
return super(Registerpost, self).as_xhtml()
class IckeSFS(ParseError):
"""Sl\xe4ngs n\xe4r en f\xf6rfattning som inte \xe4r en egentlig
SFS-f\xf6rfattning parsas"""
pass
class UpphavdForfattning(DocumentRemovedError):
pass
class IdNotFound(ParseError):
pass
DCT = Namespace(util.ns['dct'])
XSD = Namespace(util.ns['xsd'])
RINFOEX = Namespace("http://lagen.nu/terms#")
class SFSDocumentStore(DocumentStore):
def basefile_to_pathfrag(self, basefile):
return quote(basefile.replace(":", "/"))
def pathfrag_to_basefile(self, pathfrag):
return unquote(pathfrag.replace("\\","/").replace("/", ":"))
def register_path(self, basefile):
return self.path(basefile, "register", ".html")
class SFS(SwedishLegalSource):
"""
Documentation to come.
A note about logging:
There are four additional loggers available ('paragraf', 'tabell', 'numlist' and 'rubrik'). By default, manager.py turns them off unless config.trace.[logname] is set. Do something like
./ferenda-build.py sfs parse 2009:924 --force --sfs-trace-rubrik
(sets the sfs.rubrik logger level to DEBUG) or
./ferenda-build.py sfs parse 2009:924 --force --sfs-trace-tabell=INFO
"""
alias = "sfs"
start_url = ("http://rkrattsbaser.gov.se/cgi-bin/thw?${HTML}=sfsr_lst&"
"${OOHTML}=sfsr_dok&${SNHTML}=sfsr_err&"
"${MAXPAGE}=26&${BASE}=SFSR&${FORD}=FIND&"
"%%C5R=FR%%C5N+%(start)s&%%C5R=TILL+%(end)s" % {'start':1600,
'end':datetime.today().year})
document_url_template = ("http://rkrattsbaser.gov.se/cgi-bin/thw?${OOHTML}=sfst_dok&"
"${HTML}=sfst_lst&${SNHTML}=sfst_err&${BASE}=SFST&"
"${TRIPSHOW}=format=THW&BET=%(basefile)s")
document_sfsr_url_template = ("http://rkrattsbaser.gov.se/cgi-bin/thw?${OOHTML}=sfsr_dok&"
"${HTML}=sfst_lst&${SNHTML}=sfsr_err&${BASE}=SFSR&"
"${TRIPSHOW}=format=THW&BET=%(basefile)s")
documentstore_class = SFSDocumentStore
def __init__(self, **kwargs):
super(SFS, self).__init__(**kwargs)
self.trace = {}
self.trace['paragraf'] = logging.getLogger('%s.paragraf' % self.alias)
self.trace['tabell'] = logging.getLogger('%s.tabell' % self.alias)
self.trace['numlist'] = logging.getLogger('%s.numlist' % self.alias)
self.trace['rubrik'] = logging.getLogger('%s.rubrik' % self.alias)
self.lagrum_parser = LegalRef(LegalRef.LAGRUM,
LegalRef.EGLAGSTIFTNING)
self.forarbete_parser = LegalRef(LegalRef.FORARBETEN)
self.current_section = '0'
self.current_headline_level = 0 # 0 = unknown, 1 = normal, 2 = sub
def get_default_options(self):
opts = super(SFS, self).get_default_options()
opts['keepexpired'] = False
return opts
def canonical_uri(self, basefile, konsolidering=False):
baseuri = "https://lagen.nu/sfs" # should(?) be hardcoded
if konsolidering:
if konsolidering == True:
return "%s/%s/konsolidering" % (baseuri, basefile)
else:
return "%s/%s/konsolidering/%s" % (baseuri, basefile, konsolidering)
else:
return "%s/%s" % (baseuri, basefile)
def download(self):
self.log.info("Getting first page of results")
resp = requests.get(self.start_url)
tree = lxml.html.document_fromstring(resp.text)
tree.make_links_absolute(self.start_url, resolve_base_href=True)
links = self.download_get_basefiles(tree.iterlinks())
for (basefile, link) in links:
self.download_single(basefile)
self._set_last_sfsnr()
def download_get_basefiles(self, source):
done = False
while not done:
nextpage = False
pagecount = 1
for (element, attribute, link, pos) in source:
if element.text is None:
continue
m = re.search("\d+:\d+", element.text)
if m and not element.text.startswith("N"):
yield(element.text, link)
else:
m = re.match("Fler poster", element.text)
if m:
nextpage = link
if nextpage:
pagecount += 1
self.log.info("Getting page #%s of results" % pagecount)
resp = requests.get(nextpage)
tree = lxml.html.document_fromstring(resp.text)
tree.make_links_absolute(self.start_url, resolve_base_href=True)
source = tree.iterlinks()
else:
done = True
def _set_last_sfsnr(self, last_sfsnr=None):
maxyear = datetime.today().year
if not last_sfsnr:
self.log.info("Setting last SFS-nr")
last_sfsnr = "1600:1"
# for f in util.list_dirs("%s/sfst" % self.download_dir, ".html"):
for basefile in self.store.list_basefiles_for("parse"):
f = self.store.downloaded_path(basefile)
tmp = self._find_uppdaterad_tom(basefile, f)
tmpyear = int(tmp.split(":")[0])
if tmpyear > maxyear:
self.log.warning('%s is probably not correct, '
'ignoring (%s)' % (tmp, basefile))
continue
if util.numcmp(tmp, last_sfsnr) > 0:
self.log.info('%s > %s (%s)' % (tmp, last_sfsnr, basefile))
last_sfsnr = tmp
self.config.next_sfsnr = last_sfsnr
self.config.write()
def download_new(self):
if not 'next_sfsnr' in self.config:
self._set_last_sfsnr()
(year, nr) = [int(
x) for x in self.config.next_sfsnr.split(":")]
done = False
real_last_sfs_nr = False
while not done:
wanted_sfs_nr = '%s:%s' % (year, nr)
self.log.info('S\xf6ker efter SFS nr %s' % wanted_sfs_nr)
base_sfsnr_list = self._check_for_sfs(year, nr)
if base_sfsnr_list:
self.download_log.info(
"%s:%s [%s]" % (year, nr, ", ".join(base_sfsnr_list)))
# usually only a 1-elem list
for base_sfsnr in base_sfsnr_list:
uppdaterad_tom = self._downloadSingle(base_sfsnr)
if base_sfsnr_list[0] == wanted_sfs_nr:
# initial grundf\xf6rfattning - varken
# "Uppdaterad T.O.M. eller "Upph\xe4vd av" ska
# vara satt
pass
elif util.numcmp(uppdaterad_tom, wanted_sfs_nr) < 0:
self.log.warning(" Texten uppdaterad t.o.m. %s, "
"inte %s" %
(uppdaterad_tom, wanted_sfs_nr))
if not real_last_sfs_nr:
real_last_sfs_nr = wanted_sfs_nr
nr = nr + 1
else:
self.log.info('tjuvkikar efter SFS nr %s:%s' % (year, nr + 1))
base_sfsnr_list = self._check_for_sfs(year, nr + 1)
if base_sfsnr_list:
if not real_last_sfs_nr:
real_last_sfs_nr = wanted_sfs_nr
nr = nr + 1 # actual downloading next loop
elif datetime.today().year > year:
self.log.info(' \xc4r det dags att byta \xe5r?')
base_sfsnr_list = self._check_for_sfs(
datetime.today().year, 1)
if base_sfsnr_list:
year = datetime.today().year
nr = 1 # actual downloading next loop
else:
self.log.info(' Vi \xe4r klara')
done = True
else:
self.log.info(' Vi \xe4r klara')
done = True
if real_last_sfs_nr:
self._set_last_sfsnr(real_last_sfs_nr)
else:
self._set_last_sfsnr("%s:%s" % (year, nr))
def _check_for_sfs(self, year, nr):
"""Givet ett SFS-nummer, returnera en lista med alla
SFS-numret f\xf6r dess grundf\xf6rfattningar. Normalt sett har en
\xe4ndringsf\xf6rfattning bara en grundf\xf6rfattning, men f\xf6r vissa
(exv 2008:605) finns flera. Om SFS-numret inte finns alls,
returnera en tom lista."""
# Titta f\xf6rst efter grundf\xf6rfattning
self.log.info(' Letar efter grundf\xf6rfattning')
grundforf = []
url = ("http://62.95.69.15/cgi-bin/thw?${HTML}=sfsr_lst&"
"${OOHTML}=sfsr_dok&${SNHTML}=sfsr_err&${MAXPAGE}=26&"
"${BASE}=SFSR&${FORD}=FIND&${FREETEXT}=&BET=%s:%s&"
"\xC4BET=&ORG=" % (year, nr))
# FIXME: consider using mechanize
tmpfile = mktemp()
self.browser.retrieve(url, tmpfile)
t = TextReader(tmpfile, encoding="iso-8859-1")
try:
t.cue("<p>S\xf6kningen gav ingen tr\xe4ff!</p>")
except IOError: # hurra!
grundforf.append("%s:%s" % (year, nr))
return grundforf
# Sen efter \xe4ndringsf\xf6rfattning
self.log.info(' Letar efter \xe4ndringsf\xf6rfattning')
url = ("http://62.95.69.15/cgi-bin/thw?${HTML}=sfsr_lst&"
"${OOHTML}=sfsr_dok&${SNHTML}=sfsr_err&${MAXPAGE}=26&"
"${BASE}=SFSR&${FORD}=FIND&${FREETEXT}=&BET=&"
"\xC4BET=%s:%s&ORG=" % (year, nr))
self.browser.retrieve(url, tmpfile)
# maybe this is better done through mechanize?
t = TextReader(tmpfile, encoding="iso-8859-1")
try:
t.cue("<p>S\xf6kningen gav ingen tr\xe4ff!</p>")
self.log.info(' Hittade ingen \xe4ndringsf\xf6rfattning')
return grundforf
except IOError:
t.seek(0)
try:
t.cuepast('<input type="hidden" name="BET" value="')
grundforf.append(t.readto("$"))
self.log.debug(' Hittade \xe4ndringsf\xf6rfattning (till %s)' %
grundforf[-1])
return grundforf
except IOError:
t.seek(0)
page = t.read(sys.maxsize)
for m in re.finditer('>(\d+:\d+)</a>', page):
grundforf.append(m.group(1))
self.log.debug(' Hittade \xe4ndringsf\xf6rfattning (till %s)'
% grundforf[-1])
return grundforf
def download_single(self, basefile):
"""Laddar ner senaste konsoliderade versionen av
grundf\xf6rfattningen med angivet SFS-nr. Om en tidigare version
finns p\xe5 disk, arkiveras den. Returnerar det SFS-nummer till
vilket f\xf6rfattningen uppdaterats."""
self.log.debug('Attempting to download %s' % (basefile))
sfst_url = self.document_url_template % {'basefile': basefile.replace(" ", "+")}
sfsr_url = self.document_sfsr_url_template % {'basefile': basefile.replace(" ", "+")}
# FIXME: a lot of code duplication compared to
# DocumentRepository.download_single. Maybe particularly the
# DocumentEntry juggling should go into download_if_needed()?
created = not os.path.exists(self.store.downloaded_path(basefile))
if self.download_if_needed(sfst_url, basefile):
if created:
self.log.info("%s: downloaded from %s" % (basefile, sfst_url))
else:
self.log.info(
"%s: downloaded new version from %s" % (basefile, sfst_url))
updated = True
# using the attachmend functionality makes some sense, but
# requires that self.store.storage_policy = "dir"
# regfilename= self.store.downloaded_path(basefile,attachment="register")
regfilename = self.store.register_path(basefile)
self.download_if_needed(sfsr_url, basefile, archive=False, filename=regfilename)
entry = DocumentEntry(self.store.documententry_path(basefile))
now = datetime.now()
entry.orig_url = sfst_url
if created:
entry.orig_created = now
if updated:
entry.orig_updated = now
checked = True
if checked:
entry.orig_checked = now
entry.save()
return updated
# FIXME: This doesn't work at all
def get_archive_version(self,basefile):
sfst_file = self.store.downloaded_path(basefile)
# FIXME: Implement get_archive_version
if os.path.exists(sfst_file):
old_checksum = self._checksum(sfst_file)
new_checksum = self._checksum(sfst_tempfile)
upphavd_genom = self._find_upphavts_genom(sfst_tempfile)
uppdaterad_tom = self._find_uppdaterad_tom(basefile, sfst_tempfile)
if (old_checksum != new_checksum):
old_uppdaterad_tom = self._find_uppdaterad_tom(
basefile, sfst_file)
uppdaterad_tom = self._find_uppdaterad_tom(
basefile, sfst_tempfile)
if uppdaterad_tom != old_uppdaterad_tom:
self.log.info(' %s har \xe4ndrats (%s -> %s)' % (
basefile, old_uppdaterad_tom, uppdaterad_tom))
self._archive(sfst_file, basefile, old_uppdaterad_tom)
else:
self.log.info(' %s har \xe4ndrats (gammal '
'checksum %s)' % (basefile, old_checksum))
self._archive(sfst_file,
basefile, old_uppdaterad_tom, old_checksum)
# replace the current file, regardless of wheter
# we've updated it or not
util.robust_rename(sfst_tempfile, sfst_file)
elif upphavd_genom:
self.log.info(' %s har upph\xe4vts' % (basefile))
else:
self.log.debug(' %s har inte \xe4ndrats (gammal '
'checksum %s)' % (basefile, old_checksum))
else:
util.robust_rename(sfst_tempfile, sfst_file)
sfsr_url = ("http://62.95.69.15/cgi-bin/thw?${OOHTML}=sfsr_dok&"
"${HTML}=sfst_lst&${SNHTML}=sfsr_err&${BASE}=SFSR&"
"${TRIPSHOW}=format=THW&BET=%s" % basefile.replace(" ", "+"))
sfsr_file = self.store.register_path(doc.basefile)
if (old_uppdaterad_tom and
old_uppdaterad_tom != uppdaterad_tom):
self._archive(sfsr_file, basefile, old_uppdaterad_tom)
util.ensure_dir(sfsr_file)
sfsr_tempfile = mktemp()
self.browser.retrieve(sfsr_url, sfsr_tempfile)
util.replace_if_different(sfsr_tempfile, sfsr_file)
if upphavd_genom:
self.log.info(
' %s \xe4r upph\xe4vd genom %s' % (basefile, upphavd_genom))
return upphavd_genom
elif uppdaterad_tom:
self.log.info(
' %s \xe4r uppdaterad tom %s' % (basefile, uppdaterad_tom))
return uppdaterad_tom
else:
self.log.info(
' %s \xe4r varken uppdaterad eller upph\xe4vd' % (basefile))
return None
def _find_uppdaterad_tom(self, sfsnr, filename=None, reader=None):
if not reader:
reader = TextReader(filename, encoding='iso-8859-1')
try:
reader.cue("Ändring införd:<b> t.o.m. SFS")
l = reader.readline()
m = re.search('(\d+:\s?\d+)', l)
if m:
return m.group(1)
else:
# if m is None, the SFS id is using a non-standard
# formatting (eg 1996/613-first-version) -- interpret
# it as if it didn't exist
return sfsnr
except IOError:
return sfsnr # the base SFS nr
def _find_upphavts_genom(self, filename):
reader = TextReader(filename, encoding='iso-8859-1')
try:
reader.cue("upphävts genom:<b> SFS")
l = reader.readline()
m = re.search('(\d+:\s?\d+)', l)
if m:
return m.group(1)
else:
return None
except IOError:
return None
def _checksum(self, filename):
"""MD5-checksumman f\xf6r den angivna filen"""
import hashlib
c = hashlib.md5()
try:
plaintext = self.extract_sfst([filename])
# for some insane reason, hashlib:s update method can't seem
# to handle ordinary unicode strings
c.update(plaintext.encode('iso-8859-1'))
except:
self.log.warning("Could not extract plaintext from %s" % filename)
return c.hexdigest()
re_SimpleSfsId = re.compile(r'(\d{4}:\d+)\s*$')
re_SearchSfsId = re.compile(r'\((\d{4}:\d+)\)').search
re_ChangeNote = re.compile(r'(Lag|F\xf6rordning) \(\d{4}:\d+\)\.?$')
re_ChapterId = re.compile(r'^(\d+( \w|)) [Kk]ap.').match
re_DivisionId = re.compile(r'^AVD. ([IVX]*)').match
re_SectionId = re.compile(
r'^(\d+ ?\w?) §[ \.]') # used for both match+sub
re_SectionIdOld = re.compile(
r'^§ (\d+ ?\w?).') # as used in eg 1810:0926
re_DottedNumber = re.compile(r'^(\d+ ?\w?)\. ')
re_Bullet = re.compile(r'^(\-\-?|\x96) ')
re_NumberRightPara = re.compile(r'^(\d+)\) ').match
re_Bokstavslista = re.compile(r'^(\w)\) ')
re_ElementId = re.compile(
r'^(\d+) mom\.') # used for both match+sub
re_ChapterRevoked = re.compile(r'^(\d+( \w|)) [Kk]ap. (upph\xe4vd|har upph\xe4vts) genom (f\xf6rordning|lag) \([\d\:\. s]+\)\.?$').match
re_SectionRevoked = re.compile(r'^(\d+ ?\w?) §[ \.]([Hh]ar upph\xe4vts|[Nn]y beteckning (\d+ ?\w?) §) genom ([Ff]\xf6rordning|[Ll]ag) \([\d\:\. s]+\)\.$').match
re_RevokeDate = re.compile(
r'/(?:Rubriken u|U)pph\xf6r att g\xe4lla U:(\d+)-(\d+)-(\d+)/')
re_RevokeAuthorization = re.compile(
r'/Upph\xf6r att g\xe4lla U:(den dag regeringen best\xe4mmer)/')
re_EntryIntoForceDate = re.compile(
r'/(?:Rubriken t|T)r\xe4der i kraft I:(\d+)-(\d+)-(\d+)/')
re_EntryIntoForceAuthorization = re.compile(
r'/Tr\xe4der i kraft I:(den dag regeringen best\xe4mmer)/')
re_dehyphenate = re.compile(r'\b- (?!(och|eller))', re.UNICODE).sub
re_definitions = re.compile(r'^I (lagen|f\xf6rordningen|balken|denna lag|denna f\xf6rordning|denna balk|denna paragraf|detta kapitel) (avses med|betyder|anv\xe4nds f\xf6ljande)').match
re_brottsdef = re.compile(r'\b(d\xf6ms|d\xf6mes)(?: han)?(?:,[\w§ ]+,)? f\xf6r ([\w ]{3,50}) till (b\xf6ter|f\xe4ngelse)', re.UNICODE).search
re_brottsdef_alt = re.compile(r'[Ff]\xf6r ([\w ]{3,50}) (d\xf6ms|d\xf6mas) till (b\xf6ter|f\xe4ngelse)', re.UNICODE).search
re_parantesdef = re.compile(r'\(([\w ]{3,50})\)\.', re.UNICODE).search
re_loptextdef = re.compile(r'^Med ([\w ]{3,50}) (?:avses|f\xf6rst\xe5s) i denna (f\xf6rordning|lag|balk)', re.UNICODE).search
# use this custom matcher to ensure any strings you intend to convert
# are legal roman numerals (simpler than having from_roman throwing
# an exception)
re_roman_numeral_matcher = re.compile(
'^M?M?M?(CM|CD|D?C?C?C?)(XC|XL|L?X?X?X?)(IX|IV|V?I?I?I?)$').match
@decorators.action
@decorators.managedparsing
def parse(self, doc):
# 3 ways of getting a proper doc.uri (like
# https://lagen.nu/sfs/2008:388/konsolidering/2013:411):
# 1. use self._find_uppdaterad_tom(sfst_file, doc.basefile). Note
# that this value is often wrong (particularly typos are common).
# 2. call self.parse_sfsr(sfsr_file) and find the last
# value. Note that SFSR might be updated before SFST and so
# the last sfs no might be later than what's really in the SFS file.
# 3. analyse all text looking for all end-of-section markers
# like "Lag (2013:411).", then picking the last (sane) one.
# Ideally, we'd like to have doc.uri available early, since
# it's input for steps 2 and 3. Therefore we go for method 1,
# but maybe incorporate warnings (at least later on).
sfst_file = self.store.downloaded_path(doc.basefile)
sfsr_file = self.store.register_path(doc.basefile)
docentry_file = self.store.documententry_path(doc.basefile)
# workaround to fit into the RepoTester framework
if not os.path.exists(sfsr_file):
sfsr_file = sfst_file.replace("/downloaded/", "/register/")
if not os.path.exists(docentry_file):
docentry_file = sfst_file.replace("/downloaded/", "/entries/").replace(".html",".json")
# legacy code -- try to remove this by providing doc.basefile
# to all methods that need it
self.id = doc.basefile
# Check to see if this might not be a proper SFS at all
# (from time to time, other agencies publish their stuff
# in SFS - this seems to be handled by giving those
# documents a SFS nummer on the form "N1992:31". Filter
# these out.
if doc.basefile.startswith('N'):
raise IckeSFS()
# Check to see if the F\xf6rfattning has been revoked (using
# plain fast string searching, no fancy HTML parsing and
# traversing)
t = TextReader(sfst_file, encoding="iso-8859-1")
if not self.config.keepexpired:
try:
t.cuepast('<i>F\xf6rfattningen \xe4r upph\xe4vd/skall upph\xe4vas: ')
datestr = t.readto('</i></b>')
if datetime.strptime(datestr, '%Y-%m-%d') < datetime.today():
self.log.debug('%s: Expired' % doc.basefile)
raise UpphavdForfattning()
except IOError:
pass
# Find out last uppdaterad_tom value
t.seek(0)
uppdaterad_tom = self._find_uppdaterad_tom(doc.basefile,reader=t)
# now we can set doc.uri for reals
doc.uri = self.canonical_uri(doc.basefile, uppdaterad_tom)
doc.lang = "sv"
desc = Describer(doc.meta, doc.uri)
registry = self.parse_sfsr(sfsr_file, doc.uri)
#for uri, graph in registry.items():
# print("==== %s ====" % uri)
# print(graph.serialize(format="turtle").decode("utf-8"))
try:
plaintext = self.extract_sfst(sfst_file)
plaintextfile = self.store.path(doc.basefile, "intermediate", ".txt")
util.writefile(plaintextfile, plaintext, encoding="iso-8859-1")
(plaintext, patchdesc) = self.patch_if_needed(doc.basefile, plaintext)
if patchdesc:
desc.value(self.ns['rinfoex'].patchdescription,
patchdescription)
self.parse_sfst(plaintext, doc)
except IOError:
self.log.warning("%s: Fulltext saknas" % self.id)
# extractSFST misslyckades, d\xe5 det fanns n\xe5gon post i
# SFST-databasen (det h\xe4nder alltf\xf6r ofta att bara
# SFSR-databasen \xe4r uppdaterad).
desc.value(self.ns['dct'].title,
registry.value(URIRef(docuri),
self.ns['dct'].title))
desc.rel(self.ns['dct'].publisher,
self.lookup_resource("Regeringskansliet"))
desc.value(self.ns['dct'].identifier, "SFS " + basefile)
doc.body = Forfattning([Style(['Lagtext saknas'],
id='S1')])
# At this point, we basic metadata and a almost complete body
# structure. Enhance the metadata:
desc.value(self.ns['rdfs'].comment, 'Räksmörgås')
doc.meta.add((URIRef(doc.uri), self.ns['rdfs'].label, Literal('Räksmörgås')))
for uri in registry.keys():
desc.rel(self.ns['rpubl'].konsolideringsunderlag, uri)
desc.rdftype(self.ns['rpubl'].KonsolideradGrundforfattning)
desc.rev(self.ns['owl'].sameAs, self.canonical_uri(doc.basefile, True)) # FIXME: make this part of head metadata
desc.rel(self.ns['rpubl'].konsoliderar, self.canonical_uri(doc.basefile))
de = DocumentEntry(docentry_file)
desc.value(self.ns['rinfoex'].senastHamtad, de.orig_updated)
desc.value(self.ns['rinfoex'].senastKontrollerad, de.orig_checked)
# find any established abbreviation -- FIXME: simplifize, most
# code should be in SwedishLegalSource (c.f. lookup_resource)
g = Graph()
resource_path = "../../../res/etc/sfs-extra.n3"
if not resource_path.startswith(os.sep):
resource_path = os.path.normpath(
os.path.dirname(__file__) + os.sep + resource_path)
g.load(resource_path, format="n3")
grf_uri = self.canonical_uri(doc.basefile)
v = g.value(URIRef(grf_uri), self.ns['dct'].alternate, any=True)
if v:
desc.value(self.ns['dct'].alternate, v)
# Finally: the dct:published property for this
# rpubl:KonsolideradGrundforfattning isn't readily
# available. The true value is only found by parsing PDF files
# in another docrepo. There are three general ways of finding
# it out.
published = None
# 1. if registry contains a single value (ie a
# Grundförfattning that hasn't been amended yet), we can
# assume that dct:published == rpubl:utfardandedatum
if len(registry) == 1:
published = desc.getvalue(self.ns['rpubl'].utfardandedatum)
else:
# 2. if the last post in registry contains a
# rpubl:utfardandedatum, assume that this version of the
# rpubl:KonsolideradGrundforfattning has the same dct:published date
last_post_uri = list(registry.keys())[-1]
last_post_graph = registry[last_post_uri]
published = last_post_graph.value(URIRef(last_post_uri),
self.ns['rpubl'].utfardandedatum).toPython()
if not published:
# 3. general fallback: Use the corresponding orig_updated
# on the DocumentEntry. This is not correct (as it
# represents the date we fetched the document, not the
# date the document was made available), but it's as close
# as we can get.
published = de.orig_updated.date()
assert isinstance(published, date)
desc.value(self.ns['dct'].published, published)
rinfo_sameas = "http://rinfo.lagrummet.se/publ/sfs/%s/konsolidering/%s" % (doc.basefile, published.strftime("%Y-%m-%d"))
desc.rel(self.ns['owl'].sameAs, rinfo_sameas)
# finally, combine data from the registry with any possible
# overgangsbestammelser, and append them at the end of the
# document.
obs = {}
obsidx = None
for idx, p in enumerate(doc.body):
if isinstance(p, Overgangsbestammelser):
for ob in p:
assert isinstance(p,Overgangsbestammelse)
obs[self.canonical_url(ob.sfsid)] = ob
obsidx = idx
break
if obs:
del doc.body[obsidx]
reg = Register(rubrik='Ändringar och övergångsbestämmelser')
else:
reg = Register(rubrik='Ändringar')
for uri,graph in registry.items():
identifier = graph.value(URIRef(uri),self.ns['dct'].identifier)
identifier = identifier.replace("SFS ", "L")
rp = Registerpost(uri=uri, meta=graph, id=identifier)
reg.append(rp)
if uri in obs:
rp.append(obs[uri])
doc.body.append(reg)
def _forfattningstyp(self, forfattningsrubrik):
if (forfattningsrubrik.startswith('Lag ') or
(forfattningsrubrik.endswith('lag') and not forfattningsrubrik.startswith('F\xf6rordning')) or
forfattningsrubrik.endswith('balk')):
return self.ns['rpubl'].Lag
else:
return self.ns['rpubl'].Forordning
def _dict_to_graph(self, d, graph, uri):
mapping = {'SFS nr': self.ns['rpubl'].fsNummer,
'Rubrik': self.ns['dct'].title,
'Senast h\xe4mtad': self.ns['rinfoex'].senastHamtad,
'Utf\xe4rdad': self.ns['rpubl'].utfardandedatum,
'Utgivare': self.ns['dct'].publisher,
'Departement/ myndighet': self.ns['dct'].creator
}
desc = Describer(graph, uri)
for (k,v) in d.items():
if k in mapping:
if hasattr(v, 'uri'):
desc.rel(mapping[k],v.uri)
else:
desc.value(mapping[k], v)
# # metadataf\xe4lt (kan f\xf6rekomma i b\xe5de SFST-header och SFSR-datat)
# # som bara har ett enda v\xe4rde
# labels = {'SFS-nummer': RPUBL['fsNummer'],
# 'SFS nr': RPUBL['fsNummer'],
# 'Ansvarig myndighet': DCT['creator'],
# 'Departement/ myndighet': DCT['creator'],
# 'Utgivare': DCT['publisher'],
# 'Rubrik': DCT['title'],
# 'Utf\xe4rdad': RPUBL['utfardandedatum'],
# 'Ikraft': RPUBL['ikrafttradandedatum'],
# 'Observera': RDFS.comment, # FIXME: hitta b\xe4ttre predikat
# '\xd6vrigt': RDFS.comment, # FIXME: hitta b\xe4ttre predikat
# 'Tidsbegr\xe4nsad': RINFOEX['tidsbegransad'],
# 'Omtryck': RINFOEX['omtryck'], # subtype av RPUBL['fsNummer']
# '\xc4ndring inf\xf6rd': RPUBL['konsolideringsunderlag'],
# 'F\xf6rfattningen har upph\xe4vts genom':
# RINFOEX['upphavdAv'],
# # ska vara owl:inverseOf
# # rinfo:upphaver
# 'Upph\xe4vd': RINFOEX['upphavandedatum']
# }
#
# # metadataf\xe4lt som kan ha flera v\xe4rden (kommer representeras som
# # en lista av unicodeobjekt och LinkSubject-objekt)
# multilabels = {'F\xf6rarbeten': RPUBL['forarbete'],
# 'CELEX-nr': RPUBL['forarbete'],
# 'Omfattning': RPUBL['andrar'], # ocks\xe5 RPUBL['ersatter'], RPUBL['upphaver'], RPUBL['inforsI']
# }
def parse_sfsr(self, filename, docuri):
"""Parsear ut det SFSR-registret som inneh\xe5ller alla \xe4ndringar
i lagtexten fr\xe5n HTML-filer"""
all_attribs = []
with codecs.open(filename, encoding="iso-8859-1") as fp:
soup = bs4.BeautifulSoup(fp.read(),"lxml")
d = OrderedDict()
#g = Graph()
#desc = Describer(g, docuri)
rubrik = soup.body('table')[2].text.strip()
changes = soup.body('table')[3:-2]
for table in changes:
sfsnr = table.find(text="SFS-nummer:").find_parent("td").find_next_sibling("td").text.strip()
# FIXME: canonical uri for this docrepo is consolidated
# documents. we need the uri for the base document. Either
# create a helper docrepo (ferenda.legal.se.SFSPrint) or
# implement a helper method.
docuri = self.canonical_uri(sfsnr)
g = self.make_graph() # to get proper namespace bindings
d[docuri] = g
desc = Describer(g,docuri)
rowdict = {}
for row in table('tr'):
key = row.td.text.strip()
if key.endswith(":"):
key = key[:-1] # trim ending ":"
elif key == '':
continue
val = row('td')[1].text.replace('\xa0', ' ').strip()
if val == "":
continue
rowdict[key] = val
# first change does not contain a "Rubrik" key. Fake it.
if 'Rubrik' not in rowdict and rubrik:
rowdict['Rubrik'] = rubrik
rubrik = None
for key,val in rowdict.items():
if key == 'SFS-nummer':
(arsutgava, lopnummer) = val.split(":")
desc.value(self.ns['dct'].identifier, "SFS " + val)
desc.value(self.ns['rpubl'].arsutgava, arsutgava)
desc.value(self.ns['rpubl'].lopnummer, lopnummer)
# desc.value("rpubl:lopnummer", lopnummer)
elif key == 'Ansvarig myndighet':
try:
authrec = self.lookup_resource(val)
desc.rel(self.ns['rpubl'].departement, authrec)
except Exception as e:
desc.value(self.ns['rpubl'].departement, val)
elif key == 'Rubrik':
if not self.id in val:
self.log.warning("%s: Base SFS %s not found in title %r" % (self.id, self.id, val))
desc.value(self.ns['dct'].title, Literal(val, lang="sv"))
desc.rdftype(self._forfattningstyp(val))
elif key == 'Observera':
if not self.config.keepexpired:
if 'F\xf6rfattningen \xe4r upph\xe4vd/skall upph\xe4vas: ' in val:
if datetime.strptime(val[41:51], '%Y-%m-%d') < datetime.today():
raise UpphavdForfattning()
desc.value(self.ns['rdfs'].comment, val)
elif key == 'Ikraft':
desc.value(self.ns['rpubl'].ikrafttradandedatum,
datetime.strptime(val[:10], '%Y-%m-%d').date())
elif key == 'Omfattning':
# First, create rdf statements for every
# single modified section we can find
for changecat in val.split('; '):
if (changecat.startswith('\xe4ndr.') or
changecat.startswith('\xe4ndr ') or
changecat.startswith('\xe4ndring ')):
pred = self.ns['rpubl'].ersatter
elif (changecat.startswith('upph.') or
changecat.startswith('utg\xe5r')):
pred = self.ns['rpubl'].upphaver
elif (changecat.startswith('ny') or
changecat.startswith('ikrafttr.') or
changecat.startswith('ikrafftr.') or
changecat.startswith('ikraftr.') or
changecat.startswith('ikrafttr\xe4d.') or
changecat.startswith('till\xe4gg')):
pred = self.ns['rpubl'].inforsI
elif (changecat.startswith('nuvarande') or
changecat in ('begr. giltighet','Omtryck',
'omtryck', 'forts.giltighet',
'forts. giltighet',
'forts. giltighet av vissa best.')):
pred = None
else:
self.log.warning("%s: Ok\xe4nd omfattningstyp %r" % (self.id, changecat))
pred = None
for node in self.lagrum_parser.parse(changecat, docuri, pred):
if hasattr(node,'predicate'):
desc.rel(node.predicate, node.uri)
# Secondly, preserve the entire text
desc.value(self.ns['rpubl'].andrar, val)
elif key == 'F\xf6rarbeten':
for node in self.forarbete_parser.parse(val):
if hasattr(node, 'uri'):
with desc.rel(self.ns['rpubl'].forarbete,
node.uri):
desc.value(self.ns['dct'].identifier,
six.text_type(node))
elif key == 'CELEX-nr':
for celex in re.findall('3\d{2,4}[LR]\d{4}', val):
celexuri = "http://rinfo.lagrummet.se/ext/eur-lex/%s" % celex
with desc.rel(self.ns['rpubl'].genomforDirektiv,
celexuri):
desc.value(self.ns['rpubl'].celexNummer, celex)
elif key == 'Tidsbegr\xe4nsad':
expdate = datetime.strptime(val[:10], '%Y-%m-%d')
desc.value(self.ns['rinfoex'].tidsbegransad, expdate)
if expdate < datetime.today():
if not self.config.keepexpired:
raise UpphavdForfattning()
else:
self.log.warning(
'%s: Obekant nyckel [\'%s\']' % self.id, key)
# finally, add some properties not directly found in the
# registry, but which are always present for SFSes, or deducible
desc.rel(self.ns['dct'].publisher,
self.lookup_resource("Regeringskansliet"))
desc.rel(self.ns['rpubl'].beslutadAv,
self.lookup_resource("Regeringskansliet"))
desc.rel(self.ns['rpubl'].forfattningssamling,
"http://rinfo.lagrummet.se/serie/fs/sfs")
desc.rel(self.ns['owl'].sameAs,
"http://rinfo.lagrummet.se/publ/sfs/" + sfsnr)
utfardandedatum = self._find_utfardandedatum(sfsnr)
if utfardandedatum:
desc.value(self.ns['rpubl'].utfardandedatum, utfardandedatum)
return d
def _find_utfardandedatum(self, sfsnr):
# FIXME: Code to instantiate a SFSTryck object and muck about goes here
fake = {'2013:363': date(2013,5,23),
'2008:344': date(2008,5,22),
'2009:1550': date(2009,12,17),
'2013:411': date(2013,5,30),
}
return fake.get(sfsnr, None)
def extract_sfst(self, filename):
"""Plockar fram plaintextversionen av den konsoliderade
lagtexten fr\xe5n nedladdade HTML-filer"""
t = TextReader(filename, encoding="iso-8859-1")
t.cuepast('<pre>')
# remove ä et al
hp = html_parser.HTMLParser()
txt = hp.unescape(t.readto('</pre>'))
if not '\r\n' in txt:
txt = txt.replace('\n', '\r\n')
re_tags = re.compile("</?\w{1,3}>")
txt = re_tags.sub('', txt)
return txt
def _term_to_subject(self, term):
capitalized = term[0].upper() + term[1:]
return 'http://lagen.nu/concept/%s' % capitalized.replace(' ', '_')
# Post-processar dokumenttr\xe4det rekursivt och g\xf6r tre saker:
#
# Hittar begreppsdefinitioner i l\xf6ptexten
#
# Hittar adresserbara enheter (delresurser som ska ha unika URI:s,
# dvs kapitel, paragrafer, stycken, punkter) och konstruerar id's
# f\xf6r dem, exv K1P2S3N4 f\xf6r 1 kap. 2 § 3 st. 4 p
#
# Hittar lagrumsh\xe4nvisningar i l\xf6ptexten
def _construct_ids(self, element, prefix, baseuri, skip_fragments=[], find_definitions=False):
find_definitions_recursive = find_definitions
counters = defaultdict(int)
if isinstance(element, CompoundElement):
# Hitta begreppsdefinitioner
if isinstance(element, Paragraf):
# kolla om f\xf6rsta stycket inneh\xe5ller en text som
# antyder att definitioner f\xf6ljer
# self.log.debug("Testing %r against some regexes" % element[0][0])
if self.re_definitions(element[0][0]):
find_definitions = "normal"
if (self.re_brottsdef(element[0][0]) or
self.re_brottsdef_alt(element[0][0])):
find_definitions = "brottsrubricering"
if self.re_parantesdef(element[0][0]):
find_definitions = "parantes"
if self.re_loptextdef(element[0][0]):
find_definitions = "loptext"
find_definitions_recursive = find_definitions
# Hitta lagrumsh\xe4nvisningar + definitioner
if (isinstance(element, Stycke)
or isinstance(element, Listelement)
or isinstance(element, Tabellcell)):
nodes = []
term = None
# self.log.debug("handling text %s, find_definitions %s" % (element[0],find_definitions))
if find_definitions:
elementtext = element[0]
termdelimiter = ":"
if isinstance(element, Tabellcell):
if elementtext != "Beteckning":
term = elementtext
self.log.debug(
'"%s" \xe4r nog en definition (1)' % term)
elif isinstance(element, Stycke):
# Case 1: "antisladdsystem: ett tekniskt st\xf6dsystem"
# Sometimes, : is not the delimiter between
# the term and the definition, but even in
# those cases, : might figure in the
# definition itself, usually as part of the
# SFS number. Do some hairy heuristics to find
# out what delimiter to use
if find_definitions == "normal":
if not self.re_definitions(elementtext):
if " - " in elementtext:
if (":" in elementtext and
(elementtext.index(":") < elementtext.index(" - "))):
termdelimiter = ":"
else:
termdelimiter = " - "
m = self.re_SearchSfsId(elementtext)
if termdelimiter == ":" and m and m.start() < elementtext.index(":"):
termdelimiter = " "
if termdelimiter in elementtext:
term = elementtext.split(termdelimiter)[0]
self.log.debug('"%s" \xe4r nog en definition (2.1)' % term)
# case 2: "Den som ber\xf6var annan livet, d\xf6ms
# f\xf6r mord till f\xe4ngelse"
m = self.re_brottsdef(elementtext)
if m:
term = m.group(2)
self.log.debug(
'"%s" \xe4r nog en definition (2.2)' % term)
# case 3: "F\xf6r milj\xf6brott d\xf6ms till b\xf6ter"
m = self.re_brottsdef_alt(elementtext)
if m:
term = m.group(1)
self.log.debug(
'"%s" \xe4r nog en definition (2.3)' % term)
# case 4: "Inteckning f\xe5r p\xe5 ans\xf6kan av
# fastighets\xe4garen d\xf6das (d\xf6dning)."
m = self.re_parantesdef(elementtext)
if m:
term = m.group(1)
self.log.debug(
'"%s" \xe4r nog en definition (2.4)' % term)
# case 5: "Med detaljhandel avses i denna lag
# f\xf6rs\xe4ljning av l\xe4kemedel"
m = self.re_loptextdef(elementtext)
if m:
term = m.group(1)
self.log.debug(
'"%s" \xe4r nog en definition (2.5)' % term)
elif isinstance(element, Listelement):
# remove
for rx in (self.re_Bullet,
self.re_DottedNumber,
self.re_Bokstavslista):
elementtext = rx.sub('', elementtext)
term = elementtext.split(termdelimiter)[0]
self.log.debug('"%s" \xe4r nog en definition (3)' % term)
# Longest legitimate term found "Valutav\xe4xling,
# betalnings\xf6verf\xf6ring och annan finansiell
# verksamhet"
if term and len(term) < 68:
# this results in empty/hidden links -- might
# be better to hchange sfs.template.xht2 to
# change these to <span rel="" href=""/>
# instead. Or use another object than LinkSubject.
term = util.normalize_space(term)
termnode = LinkSubject(term, uri=self._term_to_subject(
term), predicate="dct:subject")
find_definitions_recursive = False
else:
term = None
for p in element: # normally only one, but can be more
# if the Stycke has a NumreradLista
# or similar
if isinstance(p, six.text_type): # look for stuff
# normalize and convert some characters
s = " ".join(p.split())
s = s.replace("\x96", "-")
# Make all links have a dct:references
# predicate -- not that meaningful for the
# XHTML2 code, but needed to get useful RDF
# triples in the RDFa output
# print "Parsing %s" % " ".join(p.split())
# print "Calling parse w %s" % baseuri+"#"+prefix
parsednodes = self.lagrum_parser.parse(s,
baseuri +
prefix,
"dct:references")
for n in parsednodes:
# py2 compat FIxme
if term and isinstance(n, six.text_type) and term in n:
(head, tail) = n.split(term, 1)
nodes.extend((head, termnode, tail))
else:
nodes.append(n)
idx = element.index(p)
element[idx:idx + 1] = nodes
# Konstruera IDs
for p in element:
counters[type(p)] += 1
if hasattr(p, 'fragment_label'):
elementtype = p.fragment_label
if hasattr(p, 'ordinal'):
elementordinal = p.ordinal.replace(" ", "")
elif hasattr(p, 'sfsnr'):
elementordinal = p.sfsnr
else:
elementordinal = counters[type(p)]
fragment = "%s%s%s" % (prefix, elementtype, elementordinal)
p.id = fragment
p.uri = baseuri + "#" + fragment
else:
fragment = prefix
if ((hasattr(p, 'fragment_label') and
p.fragment_label in skip_fragments)):
self._construct_ids(p, prefix, baseuri, skip_fragments,
find_definitions_recursive)
else:
self._construct_ids(p, fragment, baseuri, skip_fragments,
find_definitions_recursive)
# Efter att f\xf6rsta tabellcellen i en rad hanterats,
# undvik att leta definitioner i tabellceller 2,3,4...
if isinstance(element, Tabellrad):
#print "sl\xe4cker definitionsletarflaggan"
find_definitions_recursive = False
def _count_elements(self, element):
counters = defaultdict(int)
if isinstance(element, CompoundElement):
for p in element:
if hasattr(p, 'fragment_label'):
counters[p.fragment_label] += 1
if hasattr(p, 'ordinal'):
counters[p.fragment_label + p.ordinal] += 1
subcounters = self._count_elements(p)
for k in subcounters:
counters[k] += subcounters[k]
return counters
def parse_sfst(self, text, doc):
# self.reader = TextReader(ustring=lawtext,linesep=TextReader.UNIX)
self.reader = TextReader(ustring=text, linesep=TextReader.DOS)
self.reader.autostrip = True
desc = Describer(doc.meta, doc.uri)
self.make_header(desc)
doc.body = self.makeForfattning()
elements = self._count_elements(doc.body)
if 'K' in elements and elements['P1'] < 2:
skipfragments = ['A', 'K']
else:
skipfragments = ['A']
self._construct_ids(doc.body, '',
self.canonical_uri(doc.basefile),
skipfragments)
#----------------------------------------------------------------
#
# SFST-PARSNING
def make_header(self, desc):
subreader = self.reader.getreader(
self.reader.readchunk, self.reader.linesep * 4)
re_sfs = self.re_SimpleSfsId.search
for line in subreader.getiterator(subreader.readparagraph):
if ":" in line:
(key, val) = [util.normalize_space(x)
for x in line.split(":", 1)]
# Simple string literals
if key == 'Rubrik':
desc.value(self.ns['dct'].title, Literal(val, lang="sv"))
elif key == '\xd6vrigt':
desc.value(self.ns['rdfs'].comment, Literal(val, lang="sv"))
elif key == 'SFS nr':
identifier = "SFS " + val
# delay actual writing to graph, since we may need to
# amend this
# date literals
elif key == 'Utf\xe4rdad':
desc.value(self.ns['rpubl'].utfardandedatum,
datetime.strptime(val[:10], '%Y-%m-%d').date())
elif key == 'Tidsbegr\xe4nsad':
desc.value(self.ns['rinfoex'].tidsbegransad,
datetime.strptime(val[:10], '%Y-%m-%d').date())
elif key == 'Upph\xe4vd':
d = datetime.strptime(val[:10], '%Y-%m-%d')
desc.value(self.ns['rpubl'].upphavandedatum, d)
if not self.config.keepexpired and d < datetime.today():
raise UpphavdForfattning()
# urirefs
elif key == 'Departement/ myndighet':
authrec = self.lookup_resource(val)
desc.rel(self.ns['dct'].creator, authrec)
elif (key == '\xc4ndring inf\xf6rd' and re_sfs(val)):
uppdaterad = re_sfs(val).group(1)
# not sure we need to add this, since parse_sfsr catches same
desc.rel(self.ns['rpubl'].konsolideringsunderlag,
self.canonical_uri(uppdaterad))
if identifier and identifier != "SFS " + uppdaterad:
identifier += " i lydelse enligt SFS " + uppdaterad
elif (key == 'Omtryck' and re_sfs(val)):
desc.rel(self.ns['rinfoex'].omtryck,
self.canonical_uri(re_sfs(val).group(1)))
elif (key == 'F\xf6rfattningen har upph\xe4vts genom' and
re_sfs(val)):
desc.rel(self.ns['rinfoex'].upphavdAv,
self.canonical_uri(re_sfs(val).group(1)))
else:
self.log.warning(
'%s: Obekant nyckel [\'%s\']' % (self.id, key))
desc.value(self.ns['dct'].identifier, identifier)
desc.rel(self.ns['dct'].publisher,
self.lookup_resource("Regeringskansliet"))
if not desc.getvalue(self.ns['dct'].title):
self.log.warning("%s: Rubrik saknas" % self.id)
def makeForfattning(self):
while self.reader.peekline() == "":
self.reader.readline()
self.log.debug('F\xf6rsta raden \'%s\'' % self.reader.peekline())
(line, upphor, ikrafttrader) = self.andringsDatum(
self.reader.peekline())
if ikrafttrader:
self.log.debug(
'F\xf6rfattning med ikrafttr\xe4dandedatum %s' % ikrafttrader)
b = Forfattning(ikrafttrader=ikrafttrader,
uri=self.canonical_uri(self.id))
self.reader.readline()
else:
self.log.debug('F\xf6rfattning utan ikrafttr\xe4dandedatum')
b = Forfattning(uri=self.canonical_uri(self.id))
while not self.reader.eof():
state_handler = self.guess_state()
# special case - if a Overgangsbestammelse is encountered
# without the preceeding headline (which would normally
# set state_handler to makeOvergangsbestammelser (notice
# the plural)
if state_handler == self.makeOvergangsbestammelse:
res = self.makeOvergangsbestammelser(rubrik_saknas=True)
else:
res = state_handler()
if res is not None:
b.append(res)
return b
def makeAvdelning(self):
avdelningsnummer = self.idOfAvdelning()
p = Avdelning(rubrik=self.reader.readline(),
ordinal=avdelningsnummer,
underrubrik=None)
if (self.reader.peekline(1) == "" and
self.reader.peekline(3) == "" and
not self.isKapitel(self.reader.peekline(2))):
self.reader.readline()
p.underrubrik = self.reader.readline()
self.log.debug(" Ny avdelning: '%s...'" % p.rubrik[:30])
while not self.reader.eof():
state_handler = self.guess_state()
if state_handler in (self.makeAvdelning, # Strukturer som signalerar att denna avdelning \xe4r slut
self.makeOvergangsbestammelser,
self.makeBilaga):
self.log.debug(" Avdelning %s f\xe4rdig" % p.ordinal)
return p
else:
res = state_handler()
if res is not None:
p.append(res)
# if eof is reached
return p
def makeUpphavtKapitel(self):
kapitelnummer = self.idOfKapitel()
c = UpphavtKapitel(self.reader.readline(),
ordinal=kapitelnummer)
self.log.debug(" Upph\xe4vt kapitel: '%s...'" % c[:30])
return c
def makeKapitel(self):
kapitelnummer = self.idOfKapitel()
para = self.reader.readparagraph()
(line, upphor, ikrafttrader) = self.andringsDatum(para)
kwargs = {'rubrik': util.normalize_space(line),
'ordinal': kapitelnummer}
if upphor:
kwargs['upphor'] = upphor
if ikrafttrader:
kwargs['ikrafttrader'] = ikrafttrader
k = Kapitel(**kwargs)
self.current_headline_level = 0
self.current_section = '0'
self.log.debug(" Nytt kapitel: '%s...'" % line[:30])
while not self.reader.eof():
state_handler = self.guess_state()
if state_handler in (self.makeKapitel, # Strukturer som signalerar slutet p\xe5 detta kapitel
self.makeUpphavtKapitel,
self.makeAvdelning,
self.makeOvergangsbestammelser,
self.makeBilaga):
self.log.debug(" Kapitel %s f\xe4rdigt" % k.ordinal)
return (k)
else:
res = state_handler()
if res is not None:
k.append(res)
# if eof is reached
return k
def makeRubrik(self):
para = self.reader.readparagraph()
(line, upphor, ikrafttrader) = self.andringsDatum(para)
self.log.debug(" Ny rubrik: '%s...'" % para[:30])
kwargs = {}
if upphor:
kwargs['upphor'] = upphor
if ikrafttrader:
kwargs['ikrafttrader'] = ikrafttrader
if self.current_headline_level == 2:
kwargs['type'] = 'underrubrik'
elif self.current_headline_level == 1:
self.current_headline_level = 2
h = Rubrik(line, **kwargs)
return h
def makeUpphavdParagraf(self):
paragrafnummer = self.idOfParagraf(self.reader.peekline())
p = UpphavdParagraf(self.reader.readline(),
ordinal=paragrafnummer)
self.current_section = paragrafnummer
self.log.debug(" Upph\xe4vd paragraf: '%s...'" % p[:30])
return p
def makeParagraf(self):
paragrafnummer = self.idOfParagraf(self.reader.peekline())
self.current_section = paragrafnummer
firstline = self.reader.peekline()
self.log.debug(" Ny paragraf: '%s...'" % firstline[:30])
# L\xe4s f\xf6rbi paragrafnumret:
self.reader.read(len(paragrafnummer) + len(' § '))
# some really old laws have sections split up in "elements"
# (moment), eg '1 § 1 mom.', '1 § 2 mom.' etc
match = self.re_ElementId.match(firstline)
if self.re_ElementId.match(firstline):
momentnummer = match.group(1)
self.reader.read(len(momentnummer) + len(' mom. '))
else:
momentnummer = None
(fixedline, upphor, ikrafttrader) = self.andringsDatum(firstline)
# L\xe4s f\xf6rbi '/Upph\xf6r [...]/' och '/Ikrafttr\xe4der [...]/'-str\xe4ngarna
self.reader.read(len(firstline) - len(fixedline))
kwargs = {'ordinal': paragrafnummer}
if upphor:
kwargs['upphor'] = upphor
if ikrafttrader:
kwargs['ikrafttrader'] = ikrafttrader
if momentnummer:
kwargs['moment'] = momentnummer
p = Paragraf(**kwargs)
state_handler = self.makeStycke
res = self.makeStycke()
p.append(res)
while not self.reader.eof():
state_handler = self.guess_state()
if state_handler in (self.makeParagraf,
self.makeUpphavdParagraf,
self.makeKapitel,
self.makeUpphavtKapitel,
self.makeAvdelning,
self.makeRubrik,
self.makeOvergangsbestammelser,
self.makeBilaga):
self.log.debug(" Paragraf %s f\xe4rdig" % paragrafnummer)
return p
elif state_handler == self.blankline:
state_handler() # Bara att sl\xe4nga bort
elif state_handler == self.makeOvergangsbestammelse:
self.log.debug(" Paragraf %s f\xe4rdig" % paragrafnummer)
self.log.warning("%s: Avskiljande rubrik saknas mellan f\xf6rfattningstext och \xf6verg\xe5ngsbest\xe4mmelser" % self.id)
return p
else:
assert state_handler == self.makeStycke, "guess_state returned %s, not makeStycke" % state_handler.__name__
#if state_handler != self.makeStycke:
# self.log.warning("behandlar '%s...' som stycke, inte med %s" % (self.reader.peekline()[:30], state_handler.__name__))
res = self.makeStycke()
p.append(res)
# eof occurred
return p
def makeStycke(self):
self.log.debug(
" Nytt stycke: '%s...'" % self.reader.peekline()[:30])
s = Stycke([util.normalize_space(self.reader.readparagraph())])
while not self.reader.eof():
#self.log.debug(" makeStycke: calling guess_state ")
state_handler = self.guess_state()
#self.log.debug(" makeStycke: guess_state returned %s " % state_handler.__name__)
if state_handler in (self.makeNumreradLista,
self.makeBokstavslista,
self.makeStrecksatslista,
self.makeTabell):
res = state_handler()
s.append(res)
elif state_handler == self.blankline:
state_handler() # Bara att sl\xe4nga bort
else:
#self.log.debug(" makeStycke: ...we're done")
return s
return s
def makeNumreradLista(self):
n = NumreradLista()
while not self.reader.eof():
# Utg\xe5 i f\xf6rsta hand fr\xe5n att n\xe4sta stycke \xe4r ytterligare
# en listpunkt (vissa t\xe4nkbara stycken kan \xe4ven matcha
# tabell m.fl.)
if self.isNumreradLista():
state_handler = self.makeNumreradLista
else:
state_handler = self.guess_state()
if state_handler not in (self.blankline,
self.makeNumreradLista,
self.makeBokstavslista,
self.makeStrecksatslista):
return n
elif state_handler == self.blankline:
state_handler()
else:
if state_handler == self.makeNumreradLista:
self.log.debug(" Ny punkt: '%s...'" %
self.reader.peekline()[:30])
listelement_ordinal = self.idOfNumreradLista()
li = Listelement(ordinal=listelement_ordinal)
p = self.reader.readparagraph()
li.append(p)
n.append(li)
else:
# this must be a sublist
res = state_handler()
n[-1].append(res)
self.log.debug(
" Punkt %s avslutad" % listelement_ordinal)
return n
def makeBokstavslista(self):
n = Bokstavslista()
while not self.reader.eof():
state_handler = self.guess_state()
if state_handler not in (self.blankline, self.makeBokstavslista):
return n
elif state_handler == self.blankline:
res = state_handler()
else:
self.log.debug(" Ny underpunkt: '%s...'" %
self.reader.peekline()[:30])
listelement_ordinal = self.idOfBokstavslista()
li = Listelement(ordinal=listelement_ordinal)
p = self.reader.readparagraph()
li.append(p)
n.append(li)
self.log.debug(" Underpunkt %s avslutad" %
listelement_ordinal)
return n
def makeStrecksatslista(self):
n = Strecksatslista()
cnt = 0
while not self.reader.eof():
state_handler = self.guess_state()
if state_handler not in (self.blankline, self.makeStrecksatslista):
return n
elif state_handler == self.blankline:
res = state_handler()
else:
self.log.debug(" Ny strecksats: '%s...'" %
self.reader.peekline()[:60])
cnt += 1
p = self.reader.readparagraph()
li = Listelement(ordinal=str(cnt))
li.append(p)
n.append(li)
self.log.debug(" Strecksats #%s avslutad" % cnt)
return n
def blankline(self):
self.reader.readline()
return None
def eof(self):
return None
def makeOvergangsbestammelser(self, rubrik_saknas=False): # svenska: \xf6verg\xe5ngsbest\xe4mmelser
# det kan diskuteras om dessa ska ses som en del av den
# konsoliderade lagtexten \xf6ht, men det verkar vara kutym att
# ha med \xe5tminstone de som kan ha relevans f\xf6r g\xe4llande r\xe4tt
self.log.debug(" Ny \xd6verg\xe5ngsbest\xe4mmelser")
if rubrik_saknas:
rubrik = "[\xd6verg\xe5ngsbest\xe4mmelser]"
else:
rubrik = self.reader.readparagraph()
obs = Overgangsbestammelser(rubrik=rubrik)
while not self.reader.eof():
state_handler = self.guess_state()
if state_handler == self.makeBilaga:
return obs
res = state_handler()
if res is not None:
if state_handler != self.makeOvergangsbestammelse:
# assume these are the initial \xd6verg\xe5ngsbest\xe4mmelser
if hasattr(self, 'id') and '/' in self.id:
sfsnr = self.id
self.log.warning("%s: \xd6verg\xe5ngsbest\xe4mmelsen saknar SFS-nummer - antar [%s]" % (self.id, sfsnr))
else:
sfsnr = '0000:000'
self.log.warning("(unknown): \xd6verg\xe5ngsbest\xe4mmelsen saknar ett SFS-nummer - antar %s" % (sfsnr))
obs.append(Overgangsbestammelse([res], sfsnr=sfsnr))
else:
obs.append(res)
return obs
def makeOvergangsbestammelse(self):
p = self.reader.readline()
self.log.debug(" Ny \xd6verg\xe5ngsbest\xe4mmelse: %s" % p)
ob = Overgangsbestammelse(sfsnr=p)
while not self.reader.eof():
state_handler = self.guess_state()
if state_handler in (self.makeOvergangsbestammelse,
self.makeBilaga):
return ob
res = state_handler()
if res is not None:
ob.append(res)
return ob
def makeBilaga(self): # svenska: bilaga
rubrik = self.reader.readparagraph()
(rubrik, upphor, ikrafttrader) = self.andringsDatum(rubrik)
kwargs = {'rubrik': rubrik}
if upphor:
kwargs['upphor'] = upphor
if ikrafttrader:
kwargs['ikrafttrader'] = ikrafttrader
b = Bilaga(**kwargs)
self.log.debug(" Ny bilaga: %s" % rubrik)
while not self.reader.eof():
state_handler = self.guess_state()
if state_handler in (self.makeBilaga,
self.makeOvergangsbestammelser):
return b
res = state_handler()
if res is not None:
b.append(res)
return b
def andringsDatum(self, line, match=False):
# Hittar \xe4ndringsdatumdirektiv i line. Om match, matcha fr\xe5n str\xe4ngens b\xf6rjan, annars s\xf6k i hela str\xe4ngen.
dates = {'ikrafttrader': None,
'upphor': None}
for (regex, key) in list({self.re_RevokeDate: 'upphor',
self.re_RevokeAuthorization: 'upphor',
self.re_EntryIntoForceDate: 'ikrafttrader',
self.re_EntryIntoForceAuthorization: 'ikrafttrader'}.items()):
if match:
m = regex.match(line)
else:
m = regex.search(line)
if m:
if len(m.groups()) == 3:
dates[key] = datetime(int(m.group(1)),
int(m.group(2)),
int(m.group(3)))
else:
dates[key] = m.group(1)
line = regex.sub('', line)
return (line.strip(), dates['upphor'], dates['ikrafttrader'])
def guess_state(self):
# sys.stdout.write(" Guessing for '%s...'" % self.reader.peekline()[:30])
try:
if self.reader.peekline() == "":
handler = self.blankline
elif self.isAvdelning():
handler = self.makeAvdelning
elif self.isUpphavtKapitel():
handler = self.makeUpphavtKapitel
elif self.isUpphavdParagraf():
handler = self.makeUpphavdParagraf
elif self.isKapitel():
handler = self.makeKapitel
elif self.isParagraf():
handler = self.makeParagraf
elif self.isTabell():
handler = self.makeTabell
elif self.isOvergangsbestammelser():
handler = self.makeOvergangsbestammelser
elif self.isOvergangsbestammelse():
handler = self.makeOvergangsbestammelse
elif self.isBilaga():
handler = self.makeBilaga
elif self.isNumreradLista():
handler = self.makeNumreradLista
elif self.isStrecksatslista():
handler = self.makeStrecksatslista
elif self.isBokstavslista():
handler = self.makeBokstavslista
elif self.isRubrik():
handler = self.makeRubrik
else:
handler = self.makeStycke
except IOError:
handler = self.eof
# sys.stdout.write("%r\n" % handler)
return handler
def isAvdelning(self):
# The start of a part ("avdelning") should be a single line
if '\n' in self.reader.peekparagraph() != "":
return False
return self.idOfAvdelning() is not None
def idOfAvdelning(self):
# There are four main styles of parts ("Avdelning") in swedish law
#
# 1998:808: "F\xd6RSTA AVDELNINGEN\n\n\xd6VERGRIPANDE BEST\xc4MMELSER"
# (also in 1932:130, 1942:740, 1956:623, 1957:297, 1962:381, 1962:700,
# 1970:988, 1970:994, 1971:235 (revoked), 1973:370 (revoked),
# 1977:263 (revoked), 1987:230, 1992:300 (revoked), 1994:200,
# 1998:674, 2000:192, 2005:104 and 2007:528 -- not always in all
# uppercase. However, the initial line "F\xd6RSTA AVDELNININGEN"
# (in any casing) is always followed by another line that
# describes/labels the part.)
#
# 1979:1152: "Avd. 1. Best\xe4mmelser om taxering av fastighet"
# (also in 1979:1193 (revoked))
#
# 1994:1009: "Avdelning I Fartyg"
#
# 1999:1229: "AVD. I INNEH\XE5LL OCH DEFINITIONER"
#
# 2009:400: "AVDELNING I. INLEDANDE BEST\xc4MMELSER"
#
# and also "1 avd." (in 1959:287 (revoked), 1959:420 (revoked)
#
# The below code checks for all these patterns in turn
#
# The variant "Avdelning 1" has also been found, but only in
# appendixes
p = self.reader.peekline()
if p.lower().endswith("avdelningen") and len(p.split()) == 2:
ordinal = p.split()[0]
return str(self._swedish_ordinal(ordinal))
elif p.startswith("AVD. ") or p.startswith("AVDELNING "):
roman = re.split(r'\s+', p)[1]
if roman.endswith("."):
roman = roman[:-1]
if self.re_roman_numeral_matcher(roman):
return str(util.from_roman(roman))
elif p.startswith("Avdelning "):
roman = re.split(r'\s+', p)[1]
if self.re_roman_numeral_matcher(roman):
return str(util.from_roman(roman))
elif p[2:6] == "avd.":
if p[0].isdigit():
return p[0]
elif p.startswith("Avd. "):
idstr = re.split(r'\s+', p)[1]
if idstr.isdigit():
return idstr
return None
def isUpphavtKapitel(self):
match = self.re_ChapterRevoked(self.reader.peekline())
return match is not None
def isKapitel(self, p=None):
return self.idOfKapitel(p) is not None
def idOfKapitel(self, p=None):
if not p:
p = self.reader.peekparagraph().replace("\n", " ")
# '1 a kap.' -- almost always a headline, regardless if it
# streches several lines but there are always special cases
# (1982:713 1 a kap. 7 §)
#m = re.match(r'^(\d+( \w|)) [Kk]ap.',p)
m = self.re_ChapterId(p)
if m:
# even though something might look like the start of a chapter, it's often just the
# start of a paragraph in a section that lists the names of chapters. These following
# attempts to filter these out by looking for some typical line endings for those cases
if (p.endswith(",") or
p.endswith(";") or
# p.endswith(")") or # but in some cases, a chapter actually ends in ), eg 1932:131
p.endswith(" och") or # in unlucky cases, a chapter heading might span two lines in a way that the first line ends with "och" (eg 1998:808 kap. 3)
p.endswith(" om") or
p.endswith(" samt") or
(p.endswith(".") and not
(m.span()[1] == len(p) or # if the ENTIRE p is eg "6 kap." (like it is in 1962:700)
p.endswith(" m.m.") or
p.endswith(" m. m.") or
p.endswith(" m.fl.") or
p.endswith(" m. fl.") or
self.re_ChapterRevoked(p)))): # If the entire chapter's
# been revoked, we still
# want to count it as a
# chapter
# sys.stdout.write("chapter_id: '%s' failed second check" % p)
return None
# sometimes (2005:1207) it's a headline, referencing a
# specific section somewhere else - if the "1 kap. " is
# immediately followed by "5 § " then that's probably the
# case
if (p.endswith(" §") or
p.endswith(" §§") or
(p.endswith(" stycket") and " § " in p)):
return None
# Om det ser ut som en tabell \xe4r det nog ingen
# kapitelrubrik -- borttaget, triggade inget
# regressionstest och orsakade bug 168
#if self.isTabell(p, requireColumns=True):
# return None
else:
return m.group(1)
else:
# sys.stdout.write("chapter_id: '%s' failed first check" % p[:40])
return None
def isRubrik(self, p=None):
if p is None:
p = self.reader.peekparagraph()
indirect = False
else:
indirect = True
self.trace['rubrik'].debug("isRubrik (%s): indirect=%s" % (
p[:50], indirect))
if len(p) > 0 and p[0].lower() == p[0] and not p.startswith("/Rubriken"):
self.trace['rubrik'].debug(
"isRubrik (%s): starts with lower-case" % (p[:50]))
return False
# self.trace['rubrik'].debug("isRubrik: p=%s" % p)
if len(p) > 110: # it shouldn't be too long, but some headlines are insanely verbose
self.trace['rubrik'].debug("isRubrik (%s): too long" % (p[:50]))
return False
# A headline should not look like the start of a paragraph or a numbered list
if self.isParagraf(p):
self.trace['rubrik'].debug(
"isRubrik (%s): looks like para" % (p[:50]))
return False
if self.isNumreradLista(p):
self.trace['rubrik'].debug(
"isRubrik (%s): looks like numreradlista" % (p[:50]))
return False
if self.isStrecksatslista(p):
self.trace['rubrik'].debug(
"isRubrik (%s): looks like strecksatslista" % (p[:50]))
return False
if (p.endswith(".") and # a headline never ends with a period, unless it ends with "m.m." or similar
not (p.endswith("m.m.") or
p.endswith("m. m.") or
p.endswith("m.fl.") or
p.endswith("m. fl."))):
self.trace['rubrik'].debug(
"isRubrik (%s): ends with period" % (p[:50]))
return False
if (p.endswith(",") or # a headline never ends with these characters
p.endswith(":") or
p.endswith("samt") or
p.endswith("eller")):
self.trace['rubrik'].debug(
"isRubrik (%s): ends with comma/colon etc" % (p[:50]))
return False
if self.re_ChangeNote.search(p): # eg 1994:1512 8 §
return False
if p.startswith("/") and p.endswith("./"):
self.trace['rubrik'].debug(
"isRubrik (%s): Seems like a comment" % (p[:50]))
return False
try:
nextp = self.reader.peekparagraph(2)
except IOError:
nextp = ''
# finally, it should be followed by a paragraph - but this
# test is only done if this check is not indirect (to avoid
# infinite recursion)
if not indirect:
if (not self.isParagraf(nextp)) and (not self.isRubrik(nextp)):
self.trace['rubrik'].debug("isRubrik (%s): is not followed by a paragraf or rubrik" % (p[:50]))
return False
# if this headline is followed by a second headline, that
# headline and all subsequent headlines should be regardes as
# sub-headlines
if (not indirect) and self.isRubrik(nextp):
self.current_headline_level = 1
# ok, all tests passed, this might be a headline!
self.trace['rubrik'].debug(
"isRubrik (%s): All tests passed!" % (p[:50]))
return True
def isUpphavdParagraf(self):
match = self.re_SectionRevoked(self.reader.peekline())
return match is not None
def isParagraf(self, p=None):
if not p:
p = self.reader.peekparagraph()
self.trace['paragraf'].debug(
"isParagraf: called w/ '%s' (peek)" % p[:30])
else:
self.trace['paragraf'].debug("isParagraf: called w/ '%s'" % p[:30])
paragrafnummer = self.idOfParagraf(p)
if paragrafnummer is None:
self.trace['paragraf'].debug(
"isParagraf: '%s': no paragrafnummer" % p[:30])
return False
if paragrafnummer == '1':
self.trace['paragraf'].debug(
"isParagraf: paragrafnummer = 1, return true")
return True
# now, if this sectionid is less than last section id, the
# section is probably just a reference and not really the
# start of a new section. One example of that is
# /1991:1469#K1P7S1.
if util.numcmp(paragrafnummer, self.current_section) < 0:
self.trace['paragraf'].debug("isParagraf: section numbering compare failed (%s <= %s)" % (paragrafnummer, self.current_section))
return False
# a similar case exists in 1994:260 and 2007:972, but there
# the referenced section has a number larger than last section
# id. Try another way to detect this by looking at the first
# character in the paragraph - if it's in lower case, it's
# probably not a paragraph.
firstcharidx = (len(paragrafnummer) + len(' § '))
# print "%r: %s" % (p, firstcharidx)
if ((len(p) > firstcharidx) and
(p[len(paragrafnummer) + len(' § ')].islower())):
self.trace['paragraf'].debug("isParagraf: section '%s' did not start with uppercase" % p[len(paragrafnummer) + len(' § '):30])
return False
return True
def idOfParagraf(self, p):
match = self.re_SectionId.match(p)
if match:
return match.group(1)
else:
match = self.re_SectionIdOld.match(p)
if match:
return match.group(1)
else:
return None
# Om assumeTable \xe4r True \xe4r testerna n\xe5got gener\xf6sare \xe4n
# annars. Den \xe4r False f\xf6r den f\xf6rsta raden i en tabell, men True
# f\xf6r de efterf\xf6ljande.
#
# Om requireColumns \xe4r True kr\xe4vs att samtliga rader \xe4r
# spaltuppdelade
def isTabell(self, p=None, assumeTable=False, requireColumns=False):
shortline = 55
shorterline = 52
if not p:
p = self.reader.peekparagraph()
# Vissa snedformatterade tabeller kan ha en h\xf6gercell som g\xe5r
# ned en rad f\xf6r l\xe5ngt gentemot n\xe4sta rad, som har en tom
# h\xf6gercell:
# xxx xxx xxxxxx xxxx xx xxxxxx xx
# xxxxx xx xx x xxxxxx xxx xxx x
# xx xxx xxx xxx
# xxx xx xxxxx xx
# xx xxx xx x xx
# dvs n\xe5got som egentligen \xe4r tv\xe5 stycken l\xe4ses in som
# ett. F\xf6rs\xf6k hitta s\xe5dana fall, och titta i s\xe5 fall endast p\xe5
# f\xf6rsta stycket
lines = []
emptyleft = False
for l in p.split(self.reader.linesep):
if l.startswith(' '):
emptyleft = True
lines.append(l)
else:
if emptyleft:
self.trace['tabell'].debug("isTabell('%s'): Snedformatterade tabellrader" % (p[:20]))
break
else:
lines.append(l)
numlines = len(lines)
# Heuristiken f\xf6r att gissa om detta stycke \xe4r en tabellrad:
# Om varje rad
# 1. \xc4r kort (indikerar en tabellrad med en enda v\xe4nstercell)
self.trace['tabell'].debug("assumeTable: %s numlines: %s requireColumns: %s " % (assumeTable, numlines, requireColumns))
if (assumeTable or numlines > 1) and not requireColumns:
matches = [l for l in lines if len(l) < shortline]
if numlines == 1 and ' ' in lines[0]:
self.trace['tabell'].debug("isTabell('%s'): Endast en rad, men tydlig kolumnindelning" % (p[:20]))
return True
if len(matches) == numlines:
self.trace['tabell'].debug("isTabell('%s'): Alla rader korta, unders\xf6ker undantag" % (p[:20]))
# generellt undantag: Om en tabells f\xf6rsta rad har
# enbart v\xe4nsterkolumn M\XE5STE den f\xf6ljas av en
# spaltindelad rad - annars \xe4r det nog bara tv\xe5 korta
# stycken, ett kort stycke f\xf6ljt av kort rubrik, eller
# liknande.
try:
p2 = self.reader.peekparagraph(2)
except IOError:
p2 = ''
try:
p3 = self.reader.peekparagraph(3)
except IOError:
p3 = ''
if not assumeTable and not self.isTabell(p2,
assumeTable=True,
requireColumns=True):
self.trace['tabell'].debug("isTabell('%s'): generellt undantag fr\xe5n alla rader korta-regeln" % (p[:20]))
return False
elif numlines == 1:
# Om stycket har en enda rad *kan* det vara en kort
# rubrik -- kolla om den f\xf6ljs av en paragraf, is\xe5fall
# \xe4r nog tabellen slut
# FIXME: Kolla om inte generella undantaget borde
# f\xe5nga det h\xe4r. Testfall
# regression-tabell-foljd-av-kort-rubrik.txt och
# temporal-paragraf-med-tabell.txt
if self.isParagraf(p2):
self.trace['tabell'].debug("isTabell('%s'): Specialundantag: f\xf6ljs av Paragraf, inte Tabellrad" % (p[:20]))
return False
if self.isRubrik(p2) and self.isParagraf(p3):
self.trace['tabell'].debug("isTabell('%s'): Specialundantag: f\xf6ljs av Rubrik och sedan Paragraf, inte Tabellrad" % (p[:20]))
return False
# Om stycket \xe4r *exakt* detta signalerar det nog
# \xf6verg\xe5ngen fr\xe5n tabell (kanske i slutet p\xe5 en
# bilaga, som i SekrL) till \xf6verg\xe5ngsbest\xe4mmelserna
if self.isOvergangsbestammelser():
self.trace['tabell'].debug("isTabell('%s'): Specialundantag: \xd6verg\xe5ngsbest\xe4mmelser" % (p[:20]))
return False
if self.isBilaga():
self.trace['tabell'].debug("isTabell('%s'): Specialundantag: Bilaga" % (p[:20]))
return False
# Detta undantag beh\xf6ves f\xf6rmodligen inte n\xe4r genererella undantaget anv\xe4nds
#elif (numlines == 2 and
# self.isNumreradLista() and (
# lines[1].startswith('F\xf6rordning (') or
# lines[1].startswith('Lag ('))):
#
# self.trace['tabell'].debug("isTabell('%s'): Specialundantag: ser ut som nummerpunkt f\xf6ljd av \xe4ndringsf\xf6rfattningsh\xe4nvisning" % (p[:20]))
# return False
# inget av undantagen till\xe4mpliga, huvudregel 1 g\xe4ller
self.trace['tabell'].debug("isTabell('%s'): %s rader, alla korta" % (p[:20], numlines))
return True
# 2. Har mer \xe4n ett mellanslag i f\xf6ljd p\xe5 varje rad (spaltuppdelning)
matches = [l for l in lines if ' ' in l]
if numlines > 1 and len(matches) == numlines:
self.trace['tabell'].debug("isTabell('%s'): %s rader, alla spaltuppdelade" % (p[:20], numlines))
return True
# 3. \xc4r kort ELLER har spaltuppdelning
self.trace['tabell'].debug("test 3")
if (assumeTable or numlines > 1) and not requireColumns:
self.trace['tabell'].debug("test 3.1")
matches = [l for l in lines if ' ' in l or len(l) < shorterline]
if len(matches) == numlines:
self.trace['tabell'].debug("isTabell('%s'): %s rader, alla korta eller spaltuppdelade" % (p[:20], numlines))
return True
# 3. \xc4r enrading med TYDLIG tabelluppdelning
if numlines == 1 and ' ' in l:
self.trace['tabell'].debug("isTabell('%s'): %s rader, alla spaltuppdelade" % (p[:20], numlines))
return True
self.trace['tabell'].debug("isTabell('%s'): %s rader, inga test matchade (aT:%r, rC: %r)" % (p[:20], numlines, assumeTable, requireColumns))
return False
def makeTabell(self):
pcnt = 0
t = Tabell()
autostrip = self.reader.autostrip
self.reader.autostrip = False
p = self.reader.readparagraph()
self.trace['tabell'].debug("makeTabell: 1st line: '%s'" % p[:30])
(trs, tabstops) = self.makeTabellrad(p)
t.extend(trs)
while (not self.reader.eof()):
(l, upphor, ikrafttrader) = self.andringsDatum(
self.reader.peekline(), match=True)
if upphor:
current_upphor = upphor
self.reader.readline()
pcnt = 1
elif ikrafttrader:
current_ikrafttrader = ikrafttrader
current_upphor = None
self.reader.readline()
pcnt = -pcnt + 1
elif self.isTabell(assumeTable=True):
kwargs = {}
if pcnt > 0:
kwargs['upphor'] = current_upphor
pcnt += 1
elif pcnt < 0:
kwargs['ikrafttrader'] = current_ikrafttrader
pcnt += 1
elif pcnt == 0:
current_ikrafttrader = None
p = self.reader.readparagraph()
if p:
(trs, tabstops) = self.makeTabellrad(
p, tabstops, kwargs=kwargs)
t.extend(trs)
else:
self.reader.autostrip = autostrip
return t
self.reader.autostrip = autostrip
return t
def makeTabellrad(self, p, tabstops=None, kwargs={}):
# Algoritmen \xe4r anpassad f\xf6r att hantera tabeller d\xe4r texten inte
# alltid \xe4r s\xe5 j\xe4mnt ordnat i spalter, som fallet \xe4r med
# SFSR-datat (gissningvis p\xe5 grund av n\xe5gon trasig
# tab-till-space-konvertering n\xe5nstans).
def makeTabellcell(text):
if len(text) > 1:
text = self.re_dehyphenate("", text)
return Tabellcell([util.normalize_space(text)])
cols = ['', '', '', '', '', '', '', '']
# Ingen tabell kommer n\xe5nsin ha mer \xe4n \xe5tta kolumner
if tabstops:
statictabstops = True # Anv\xe4nd de tabbstoppositioner vi fick f\xf6rra raden
else:
statictabstops = False # Bygg nya tabbstoppositioner fr\xe5n scratch
self.trace['tabell'].debug("rebuilding tabstops")
tabstops = [0, 0, 0, 0, 0, 0, 0, 0]
lines = p.split(self.reader.linesep)
numlines = len([x for x in lines if x])
potentialrows = len(
[x for x in lines if x and (x[0].isupper() or x[0].isdigit())])
linecount = 0
self.trace['tabell'].debug(
"numlines: %s, potentialrows: %s" % (numlines, potentialrows))
if (numlines > 1 and numlines == potentialrows):
self.trace['tabell'].debug(
'makeTabellrad: Detta verkar vara en tabellrad-per-rad')
singlelinemode = True
else:
singlelinemode = False
rows = []
emptyleft = False
for l in lines:
if l == "":
continue
linecount += 1
charcount = 0
spacecount = 0
lasttab = 0
colcount = 0
if singlelinemode:
cols = ['', '', '', '', '', '', '', '']
if l[0] == ' ':
emptyleft = True
else:
if emptyleft:
self.trace['tabell'].debug('makeTabellrad: skapar ny tabellrad pga snedformatering')
rows.append(cols)
cols = ['', '', '', '', '', '', '', '']
emptyleft = False
for c in l:
charcount += 1
if c == ' ':
spacecount += 1
else:
if spacecount > 1: # Vi har st\xf6tt p\xe5 en ny tabellcell
# - fyll den gamla
# L\xe4gg till en nyrad f\xf6r att ers\xe4tta den vi kapat -
# \xf6verfl\xf6dig whitespace trimmas senare
cols[colcount] += '\n' + l[
lasttab:charcount - (spacecount + 1)]
lasttab = charcount - 1
# f\xf6r hantering av tomma v\xe4nsterceller
if linecount > 1 or statictabstops:
if tabstops[colcount + 1] + 7 < charcount: # till\xe5t en oj\xe4mnhet om max sju tecken
if len(tabstops) <= colcount + 2:
tabstops.append(0)
cols.append('')
self.trace['tabell'].debug('colcount is %d, # of tabstops is %d' % (colcount, len(tabstops)))
self.trace['tabell'].debug('charcount shoud be max %s, is %s - adjusting to next tabstop (%s)' % (tabstops[colcount + 1] + 5, charcount, tabstops[colcount + 2]))
if tabstops[colcount + 2] != 0:
self.trace['tabell'].debug(
'safe to advance colcount')
colcount += 1
colcount += 1
if len(tabstops) <= charcount:
tabstops.append(0)
cols.append('')
tabstops[colcount] = charcount
self.trace['tabell'].debug(
"Tabstops now: %r" % tabstops)
spacecount = 0
cols[colcount] += '\n' + l[lasttab:charcount]
self.trace['tabell'].debug("Tabstops: %r" % tabstops)
if singlelinemode:
self.trace['tabell'].debug(
'makeTabellrad: skapar ny tabellrad')
rows.append(cols)
if not singlelinemode:
rows.append(cols)
self.trace['tabell'].debug(repr(rows))
res = []
for r in rows:
tr = Tabellrad(**kwargs)
emptyok = True
for c in r:
if c or emptyok:
tr.append(makeTabellcell(c.replace("\n", " ")))
if c.strip() != '':
emptyok = False
res.append(tr)
return (res, tabstops)
def isFastbredd(self):
return False
def makeFastbredd(self):
return None
def isNumreradLista(self, p=None):
return self.idOfNumreradLista(p) is not None
def idOfNumreradLista(self, p=None):
if not p:
p = self.reader.peekline()
self.trace['numlist'].debug(
"idOfNumreradLista: called directly (%s)" % p[:30])
else:
self.trace['numlist'].debug(
"idOfNumreradLista: called w/ '%s'" % p[:30])
match = self.re_DottedNumber.match(p)
if match is not None:
self.trace['numlist'].debug(
"idOfNumreradLista: match DottedNumber")
return match.group(1).replace(" ", "")
else:
match = self.re_NumberRightPara(p)
if match is not None:
self.trace['numlist'].debug(
"idOfNumreradLista: match NumberRightPara")
return match.group(1).replace(" ", "")
self.trace['numlist'].debug("idOfNumreradLista: no match")
return None
def isStrecksatslista(self, p=None):
if not p:
p = self.reader.peekline()
return (p.startswith("- ") or
p.startswith("\x96 ") or
p.startswith("--"))
def isBokstavslista(self):
return self.idOfBokstavslista() is not None
def idOfBokstavslista(self):
p = self.reader.peekline()
match = self.re_Bokstavslista.match(p)
if match is not None:
return match.group(1).replace(" ", "")
return None
def isOvergangsbestammelser(self):
separators = ['\xd6verg\xe5ngsbest\xe4mmelser',
'Ikrafttr\xe4dande- och \xf6verg\xe5ngsbest\xe4mmelser',
'\xd6verg\xe5ngs- och ikrafttr\xe4dandebest\xe4mmelser']
l = self.reader.peekline()
if l not in separators:
fuzz = difflib.get_close_matches(l, separators, 1, 0.9)
if fuzz:
self.log.warning("%s: Antar att '%s' ska vara '%s'?" %
(self.id, l, fuzz[0]))
else:
return False
try:
# if the separator "\xd6verg\xe5ngsbest\xe4mmelser" (or similar) is
# followed by a regular paragraph, it was probably not a
# separator but an ordinary headline (occurs in a few law
# texts)
np = self.reader.peekparagraph(2)
if self.isParagraf(np):
return False
except IOError:
pass
return True
def isOvergangsbestammelse(self):
return self.re_SimpleSfsId.match(self.reader.peekline())
def isBilaga(self):
(line, upphor, ikrafttrader) = self.andringsDatum(
self.reader.peekline())
return (line in ("Bilaga", "Bilaga*", "Bilaga *",
"Bilaga 1", "Bilaga 2", "Bilaga 3",
"Bilaga 4", "Bilaga 5", "Bilaga 6"))
_document_name_cache = {}
def _generateAnnotations(self, annotationfile, basefile):
p = LegalRef(LegalRef.LAGRUM)
# sfsnr = FilenameToSFSnr(basefile)
baseuri = self.canonical_uri(basefile)
start = time()
# Putting togeher a (non-normalized) RDF/XML file, suitable
# for XSLT inclusion in six easy steps
stuff = {}
#
# 1. all rinfo:Rattsfallsreferat that has baseuri as a
# rinfo:lagrum, either directly or through a chain of
# dct:isPartOf statements
start = time()
rattsfall = self._store_run_query(
"sparql/sfs_rattsfallsref.sq", uri=baseuri)
self.log.debug('%s: Orig: Selected %d legal cases (%.3f sec)',
basefile, len(rattsfall), time() - start)
stuff[baseuri] = {}
stuff[baseuri]['rattsfall'] = []
specifics = {}
for row in rattsfall:
if 'lagrum' not in row:
lagrum = baseuri
else:
# truncate 1998:204#P7S2 to just 1998:204#P7
if "S" in row['lagrum']:
lagrum = row['lagrum'][:row['lagrum'].index("S")]
else:
lagrum = row['lagrum']
specifics[row['id']] = True
# we COULD use a tricky defaultdict for stuff instead of
# this initializing code, but defauldicts don't pprint
# so pretty...
if not lagrum in stuff:
stuff[lagrum] = {}
if not 'rattsfall' in stuff[lagrum]:
stuff[lagrum]['rattsfall'] = []
record = {'id': row['id'],
'desc': row['desc'],
'uri': row['uri']}
# if one case references two or more paragraphs in a
# particular section (ie "6 kap 1 § 1 st. och 6 kap 1 § 2
# st.") we will get duplicates that we can't (easily)
# filter out in the SPARQL query. Filter them out here
# instead.
if not record in stuff[lagrum]['rattsfall']:
stuff[lagrum]['rattsfall'].append(record)
# remove cases that refer to the law itself and a specific
# paragraph (ie only keep cases that only refer to the law
# itself)
filtered = []
for r in stuff[baseuri]['rattsfall']:
if r['id'] not in specifics:
filtered.append(r)
stuff[baseuri]['rattsfall'] = filtered
# 2. all law sections that has a dct:references that matches this (using dct:isPartOf).
start = time()
#inboundlinks = self._store_run_query("sparql/sfs_inboundlinks_orig.sq",uri=baseuri)
#self.log.debug('%s: Orig: Selected %d inbound links (%.3f sec)', basefile, len(inboundlinks), time()-start)
start = time()
inboundlinks = self._store_run_query(
"sparql/sfs_inboundlinks.sq", uri=baseuri)
self.log.debug('%s: New: Selected %d inbound links (%.3f sec)',
basefile, len(inboundlinks), time() - start)
# inboundlinks = []
self.log.debug('%s: Selected %d inbound links (%.3f sec)',
basefile, len(inboundlinks), time() - start)
stuff[baseuri]['inboundlinks'] = []
# mapping <http://rinfo.lagrummet.se/publ/sfs/1999:175> =>
# "R\xe4ttsinformationsf\xf6rordning (1999:175)"
doctitles = {}
specifics = {}
for row in inboundlinks:
if 'lagrum' not in row:
lagrum = baseuri
else:
# truncate 1998:204#P7S2 to just 1998:204#P7
if "S" in row['lagrum']:
lagrum = row['lagrum'][:row['lagrum'].index("S")]
else:
lagrum = row['lagrum']
lagrum = row['lagrum']
specifics[row['uri']] = True
# we COULD use a tricky defaultdict for stuff instead of
# this initializing code, but defauldicts don't pprint
# so pretty...
if not lagrum in stuff:
stuff[lagrum] = {}
if not 'inboundlinks' in stuff[lagrum]:
stuff[lagrum]['inboundlinks'] = []
#print "adding %s under %s" % (row['id'],lagrum)
stuff[lagrum]['inboundlinks'].append({'uri': row['uri']})
# remove inbound links that refer to the law itself plus at
# least one specific paragraph (ie only keep cases that only
# refer to the law itself)
filtered = []
for r in stuff[baseuri]['inboundlinks']:
if r['uri'] not in specifics:
filtered.append(r)
stuff[baseuri]['inboundlinks'] = filtered
# pprint (stuff)
# 3. all wikientries that dct:description this
start = time()
#wikidesc = self._store_run_query("sparql/sfs_wikientries_orig.sq",uri=baseuri)
#self.log.debug('%s: Orig: Selected %d wiki comments (%.3f sec)', basefile, len(wikidesc), time()-start)
start = time()
wikidesc = self._store_run_query(
"sparql/sfs_wikientries.sq", uri=baseuri)
self.log.debug('%s: New: Selected %d wiki comments (%.3f sec)',
basefile, len(wikidesc), time() - start)
# wikidesc = []
for row in wikidesc:
if not 'lagrum' in row:
lagrum = baseuri
else:
lagrum = row['lagrum']
if not lagrum in stuff:
stuff[lagrum] = {}
stuff[lagrum]['desc'] = row['desc']
self.log.debug('%s: Selected %d wiki comments (%.3f sec)',
basefile, len(wikidesc), time() - start)
# pprint(wikidesc)
# (4. eurlex.nu data (mapping CELEX ids to titles))
# (5. Propositionstitlar)
# 6. change entries for each section
# FIXME: we need to differentiate between additions, changes
# and deletions
start = time()
#changes = self._store_run_query("sparql/sfs_changes_orig.sq",uri=baseuri)
#self.log.debug('%s: Orig: Selected %d change annotations (%.3f sec)', basefile, len(changes), time()-start)
start = time()
changes = self._store_run_query("sparql/sfs_changes.sq", uri=baseuri)
self.log.debug('%s: New: Selected %d change annotations (%.3f sec)',
basefile, len(changes), time() - start)
# changes = []
for row in changes:
lagrum = row['lagrum']
if not lagrum in stuff:
stuff[lagrum] = {}
if not 'changes' in stuff[lagrum]:
stuff[lagrum]['changes'] = []
stuff[lagrum]['changes'].append({'uri': row['change'],
'id': row['id']})
# then, construct a single de-normalized rdf/xml dump, sorted
# by root/chapter/section/paragraph URI:s. We do this using
# raw XML, not RDFlib, to avoid normalizing the graph -- we
# need repetition in order to make the XSLT processing simple.
#
# The RDF dump looks something like:
#
# <rdf:RDF>
# <rdf:Description about="http://rinfo.lagrummet.se/publ/sfs/1998:204#P1">
# <rinfo:isLagrumFor>
# <rdf:Description about="http://rinfo.lagrummet.se/publ/dom/rh/2004:51">
# <dct:identifier>RH 2004:51</dct:identifier>
# <dct:description>Hemsida p\xe5 Internet. Fr\xe5ga om...</dct:description>
# </rdf:Description>
# </rinfo:isLagrumFor>
# <dct:description>Personuppgiftslagens syfte \xe4r att skydda...</dct:description>
# <rinfo:isChangedBy>
# <rdf:Description about="http://rinfo.lagrummet.se/publ/sfs/2003:104">
# <dct:identifier>SFS 2003:104</dct:identifier>
# <rinfo:proposition>
# <rdf:Description about="http://rinfo.lagrummet.se/publ/prop/2002/03:123">
# <dct:title>\xd6versyn av personuppgiftslagen</dct:title>
# <dct:identifier>Prop. 2002/03:123</dct:identifier>
# </rdf:Description>
# </rinfo:proposition>
# </rdf:Description>
# </rinfo:isChangedBy>
# </rdf:Description>
# </rdf:RDF>
start = time()
root_node = PET.Element("rdf:RDF")
for prefix in util.ns:
# we need this in order to make elementtree not produce
# stupid namespaces like "xmlns:ns0" when parsing an external
# string like we do below (the PET.fromstring call)
PET._namespace_map[util.ns[prefix]] = prefix
root_node.set("xmlns:" + prefix, util.ns[prefix])
for l in sorted(list(stuff.keys()), cmp=util.numcmp):
lagrum_node = PET.SubElement(root_node, "rdf:Description")
lagrum_node.set("rdf:about", l)
if 'rattsfall' in stuff[l]:
for r in stuff[l]['rattsfall']:
islagrumfor_node = PET.SubElement(
lagrum_node, "rinfo:isLagrumFor")
rattsfall_node = PET.SubElement(
islagrumfor_node, "rdf:Description")
rattsfall_node.set("rdf:about", r['uri'])
id_node = PET.SubElement(rattsfall_node, "dct:identifier")
id_node.text = r['id']
desc_node = PET.SubElement(
rattsfall_node, "dct:description")
desc_node.text = r['desc']
if 'inboundlinks' in stuff[l]:
inbound = stuff[l]['inboundlinks']
inboundlen = len(inbound)
prev_uri = None
for i in range(inboundlen):
if "#" in inbound[i]['uri']:
(uri, fragment) = inbound[i]['uri'].split("#")
else:
(uri, fragment) = (inbound[i]['uri'], None)
# 1) if the baseuri differs from the previous one,
# create a new dct:references node
if uri != prev_uri:
references_node = PET.Element("dct:references")
# 1.1) if the baseuri is the same as the uri
# for the law we're generating, place it first
if uri == baseuri:
# If the uri is the same as baseuri (the law
# we're generating), place it first.
lagrum_node.insert(0, references_node)
else:
lagrum_node.append(references_node)
# Find out the next uri safely
if (i + 1 < inboundlen):
next_uri = inbound[i + 1]['uri'].split("#")[0]
else:
next_uri = None
# If uri is the same as the next one OR uri is the
# same as baseuri, use relative form for creating
# dct:identifer
# print "uri: %s, next_uri: %s, baseuri: %s" % (uri[35:],next_uri[35:],baseuri[35:])
if (uri == next_uri) or (uri == baseuri):
form = "relative"
else:
form = "absolute"
inbound_node = PET.SubElement(
references_node, "rdf:Description")
inbound_node.set("rdf:about", inbound[i]['uri'])
id_node = PET.SubElement(inbound_node, "dct:identifier")
id_node.text = self.display_title(inbound[i]['uri'], form)
prev_uri = uri
if 'changes' in stuff[l]:
for r in stuff[l]['changes']:
ischanged_node = PET.SubElement(
lagrum_node, "rinfo:isChangedBy")
#rattsfall_node = PET.SubElement(islagrumfor_node, "rdf:Description")
#rattsfall_node.set("rdf:about",r['uri'])
id_node = PET.SubElement(ischanged_node, "rinfo:fsNummer")
id_node.text = r['id']
if 'desc' in stuff[l]:
desc_node = PET.SubElement(lagrum_node, "dct:description")
xhtmlstr = "<xht2:div xmlns:xht2='%s'>%s</xht2:div>" % (
util.ns['xht2'], stuff[l]['desc'])
xhtmlstr = xhtmlstr.replace(
' xmlns="http://www.w3.org/2002/06/xhtml2/"', '')
desc_node.append(PET.fromstring(xhtmlstr.encode('utf-8')))
util.indent_et(root_node)
tree = PET.ElementTree(root_node)
tmpfile = mktemp()
treestring = PET.tostring(root_node, encoding="utf-8").replace(
' xmlns:xht2="http://www.w3.org/2002/06/xhtml2/"', '', 1)
fp = open(tmpfile, "w")
fp.write(treestring)
fp.close()
#tree.write(tmpfile, encoding="utf-8")
util.replace_if_different(tmpfile, annotationfile)
os.utime(annotationfile, None)
self.log.debug(
'%s: Serialized annotation (%.3f sec)', basefile, time() - start)
def Generate(self, basefile):
start = time()
basefile = basefile.replace(":", "/")
infile = util.relpath(self._xmlFileName(basefile))
outfile = util.relpath(self._htmlFileName(basefile))
annotations = "%s/%s/intermediate/%s.ann.xml" % (
self.config.datadir, self.alias, basefile)
force = (self.config.generateforce is True)
dependencies = self._load_deps(basefile)
wiki_comments = "data/wiki/parsed/SFS/%s.xht2" % basefile
if os.path.exists(wiki_comments):
dependencies.append(wiki_comments)
if not force and self._outfile_is_newer(dependencies, annotations):
if os.path.exists(self._depsFileName(basefile)):
self.log.debug("%s: All %s dependencies untouched in rel to %s" %
(basefile, len(dependencies), util.relpath(annotations)))
else:
self.log.debug("%s: Has no dependencies" % basefile)
else:
self.log.info("%s: Generating annotation file", basefile)
start = time()
self._generateAnnotations(annotations, basefile)
if time() - start > 5:
self.log.info("openrdf-sesame is getting slow, reloading")
cmd = "curl -u %s:%s http://localhost:8080/manager/reload?path=/openrdf-sesame" % (self.config['tomcatuser'], self.config['tomcatpassword'])
util.runcmd(cmd)
else:
sleep(0.5) # let sesame catch it's breath
if not force and self._outfile_is_newer([infile, annotations], outfile):
self.log.debug("%s: \xd6verhoppad", basefile)
return
util.mkdir(os.path.dirname(outfile))
#params = {'annotationfile':annotations}
# FIXME: create a relative version of annotations, instead of
# hardcoding self.config.datadir like below
params = {'annotationfile':
'../data/sfs/intermediate/%s.ann.xml' % basefile}
# FIXME: Use pkg_resources to get at sfs.xsl
util.transform(__scriptdir__ + "/xsl/sfs.xsl",
infile,
outfile,
parameters=params,
validate=False)
self.log.info(
'%s: OK (%s, %.3f sec)', basefile, outfile, time() - start)
return
def display_title(self, uri, form="absolute"):
parts = LegalURI.parse(uri)
res = ""
for (field, label) in (('chapter', 'kap.'),
('section', '§'),
('piece', 'st'),
('item', 'p')):
if field in parts and not (field == 'piece' and
parts[field] == '1' and
'item' not in parts):
res += "%s %s " % (parts[field], label)
if form == "absolute":
if parts['law'] not in self._document_name_cache:
baseuri = LegalURI.construct({'type': LegalRef.LAGRUM,
'law': parts['law']})
sq = """PREFIX dct:<http://purl.org/dc/terms/>
SELECT ?title WHERE {<%s> dct:title ?title }""" % baseuri
changes = self._store_select(sq)
if changes:
self._document_name_cache[parts[
'law']] = changes[0]['title']
else:
self._document_name_cache[parts[
'law']] = "SFS %s" % parts['law']
#print "Cache miss for %s (%s)" % (parts['law'],
# self._document_name_cache[parts['law']])
res += self._document_name_cache[parts['law']]
return res
elif form == "relative":
return res.strip()
else:
raise ValueError('unknown form %s' % form)
def CleanupAnnulled(self, basefile):
infile = self._xmlFileName(basefile)
outfile = self._htmlFileName(basefile)
if not os.path.exists(infile):
util.robust_remove(outfile)
@classmethod
def relate_all_setup(cls, config):
cls._build_mini_rdf()
@classmethod
def _build_mini_rdf(cls, config):
# the resulting file contains one triple for each law text
# that has comments (should this be in Wiki.py instead?
termdir = os.path.sep.join([config.datadir, 'wiki', 'parsed', 'SFS'])
minixmlfile = os.path.sep.join(
[config.datadir, cls.alias, 'parsed', 'rdf-mini.xml'])
files = list(util.list_dirs(termdir, ".xht2"))
parser = LegalRef(LegalRef.LAGRUM)
# self.log.info("Making a mini graph")
DCT = Namespace(util.ns['dct'])
mg = Graph()
for key, value in list(util.ns.items()):
mg.bind(key, Namespace(value))
for f in files:
basefile = ":".join(os.path.split(os.path.splitext(
os.sep.join(os.path.normpath(f).split(os.sep)[-2:]))[0]))
# print "Finding out URI for %s" % basefile
try:
uri = parser.parse(basefile)[0].uri
except AttributeError: # basefile is not interpretable as a SFS no
continue
mg.add((
URIRef(uri), RDF.type, RPUBL['KonsolideradGrundforfattning']))
# self.log.info("Serializing the minimal graph")
f = open(minixmlfile, 'w')
f.write(mg.serialize(format="pretty-xml"))
f.close()
def _file_to_basefile(self, f):
"""Override of LegalSource._file_to_basefile, with special
handling of archived versions and two-part documents"""
# this transforms 'foo/bar/baz/HDO/1-01.doc' to 'HDO/1-01'
if '-' in f:
return None
basefile = "/".join(os.path.split(os.path.splitext(
os.sep.join(os.path.normpath(f).split(os.sep)[-2:]))[0]))
if basefile.endswith('_A') or basefile.endswith('_B'):
basefile = basefile[:-2]
return basefile
def _indexpages_predicates(self):
return [util.ns['dct'] + "title",
util.ns['rinfo'] + 'fsNummer',
util.ns['rdf'] + 'type',
util.ns['rinfo'] + 'KonsolideradGrundforfattning']
def _build_indexpages(self, by_pred_obj, by_subj_pred):
documents = defaultdict(lambda: defaultdict(list))
pagetitles = {}
pagelabels = {}
fsnr_pred = util.ns['rinfo'] + 'fsNummer'
title_pred = util.ns['dct'] + 'title'
type_pred = util.ns['rdf'] + 'type'
type_obj = util.ns['rinfo'] + 'KonsolideradGrundforfattning'
year_lbl = 'Ordnade efter utgivnings\xe5r'
title_lbl = 'Ordnade efter titel'
# construct the 404 page - we should really do this in the
# form of a xht2 page that gets transformed using static.xsl,
# but it's tricky to get xslt to output a href attribute with
# an embedded (SSI) comment.
doc = '''<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"><html xmlns="http://www.w3.org/1999/xhtml" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:rinfo="http://rinfo.lagrummet.se/taxo/2007/09/rinfo/pub#" xmlns:xsd="http://www.w3.org/2001/XMLSchema#" xmlns:rinfoex="http://lagen.nu/terms#" xml:lang="sv" lang="sv"><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /><title>F\xf6rfattningstext saknas | Lagen.nu</title><script type="text/javascript" src="/js/jquery-1.2.6.min.js"></script><script type="text/javascript" src="/js/jquery.treeview.min.js"></script><script type="text/javascript" src="/js/base.js"></script><link rel="shortcut icon" href="/img/favicon.ico" type="image/x-icon" /><link rel="stylesheet" href="/css/screen.css" media="screen" type="text/css" /><link rel="stylesheet" href="/css/print.css" media="print" type="text/css" /></head><body><div id="vinjett"><h1><a href="/">lagen.nu</a></h1><ul id="navigation"><li><a href="/nyheter/">Nyheter</a></li><li><a href="/index/">Lagar</a></li><li><a href="/dom/index/">Domar</a></li><li><a href="/om/">Om</a></li></ul><form method="get" action="http://www.google.com/custom"><p><span class="accelerator">S</span>\xf6k:<input type="text" name="q" id="q" size="40" maxlength="255" value="" accesskey="S" /><input type="hidden" name="cof" value="S:http://bself.log.tomtebo.org/;AH:center;AWFID:22ac01fa6655f6b6;" /><input type="hidden" name="domains" value="lagen.nu" /><input type="hidden" name="sitesearch" value="lagen.nu" checked="checked" /></p></form></div><div id="colmask" class="threecol"><div id="colmid"><div id="colleft"><div id="dokument">
<h1>F\xf6rfattningstext saknas</h1>
<p>Det verkar inte finnas n\xe5gon f\xf6rfattning med SFS-nummer
<!--#echo var="REDIRECT_SFS" -->. Om den har funnits tidigare s\xe5
kanske den har blivit upph\xe4vd?</p>
<p>Om den har blivit upph\xe4vd kan den finnas i sin sista lydelse p\xe5
Regeringskansliets r\xe4ttsdatabaser:
<a href="http://62.95.69.15/cgi-bin/thw?${HTML}=sfst_lst&${OOHTML}=sfst_dok&${SNHTML}=sfst_err&${BASE}=SFST&${TRIPSHOW}=format%3DTHW&BET=<!--#echo var="REDIRECT_SFS" -->">S\xf6k efter SFS <!--#echo var="REDIRECT_SFS" --></a>.</p>
</div><div id="kommentarer"></div><div id="referenser"></div></div></div></div><div id="sidfot"><b>Lagen.nu</b> \xe4r en privat webbplats. Informationen h\xe4r \xe4r inte officiell och kan vara felaktig | <a href="/om/ansvarsfriskrivning.html">Ansvarsfriskrivning</a> | <a href="/om/kontakt.html">Kontaktinformation</a></div><script type="text/javascript">var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www."); document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));</script><script type="text/javascript">var pageTracker = _gat._getTracker("UA-172287-1"); pageTracker._trackPageview();</script></body></html>'''
outfile = "%s/%s/generated/notfound.shtml" % (
self.config.datadir, self.alias)
fp = codecs.open(outfile, "w", encoding='utf-8')
fp.write(doc)
fp.close()
print(("wrote %s" % outfile))
# list all subjects that are of rdf:type rinfo:KonsolideradGrundforfattning
for subj in by_pred_obj[type_pred][type_obj]:
fsnr = by_subj_pred[subj][fsnr_pred]
title = by_subj_pred[subj][title_pred]
sorttitle = re.sub(r'Kungl\. Maj:ts ', '', title)
sorttitle = re.sub(r'^(Lag|F\xf6rordning|Tillk\xe4nnagivande|[kK]ung\xf6relse) ?\([^\)]+\) ?(av|om|med|ang\xe5ende) ', '', sorttitle)
year = fsnr.split(':')[0]
letter = sorttitle[0].lower()
pagetitles[year] = 'F\xf6rfattningar utgivna %s' % year
pagelabels[year] = year
documents[year_lbl][year].append({'uri': subj,
'sortkey': fsnr,
'title': title})
if letter.isalpha():
pagetitles[letter] = 'F\xf6rfattningar som b\xf6rjar p\xe5 "%s"' % letter.upper()
pagelabels[letter] = letter.upper()
documents[title_lbl][letter].append({'uri': subj,
'sortkey': sorttitle.lower(),
'title': sorttitle,
'leader': title.replace(sorttitle, '')})
# FIXME: port the 'Nyckelbegrepp' code from 1.0
# import the old etiketter data and make a tag cloud or something
for category in list(documents.keys()):
for pageid in list(documents[category].keys()):
outfile = "%s/%s/generated/index/%s.html" % (
self.config.datadir, self.alias, pageid)
title = pagetitles[pageid]
if category == year_lbl:
self._render_indexpage(outfile, title, documents, pagelabels, category, pageid, docsorter=util.numcmp)
else:
self._render_indexpage(outfile, title, documents,
pagelabels, category, pageid)
if pageid == 'a': # make index.html
outfile = "%s/%s/generated/index/index.html" % (
self.config.datadir, self.alias)
self._render_indexpage(outfile, title, documents,
pagelabels, category, pageid)
re_message = re.compile(r'(\d+:\d+) \[([^\]]*)\]')
re_qname = re.compile(r'(\{.*\})(\w+)')
re_sfsnr = re.compile(r'\s*(\(\d+:\d+\))')
def _build_newspages(self, messages):
changes = {}
all_entries = []
lag_entries = []
ovr_entries = []
for (timestamp, message) in messages:
m = self.re_message.match(message)
change = m.group(1)
if change in changes:
continue
changes[change] = True
bases = m.group(2).split(", ")
basefile = "%s/%s/parsed/%s.xht2" % (
self.config.datadir, self.alias, SFSnrToFilename(bases[0]))
# print "opening %s" % basefile
if not os.path.exists(basefile):
# om inte den parseade filen finns kan det bero p\xe5 att
# f\xf6rfattningen \xe4r upph\xe4vd _eller_ att det blev n\xe5got
# fel vid parseandet.
self.log.warning("File %s not found" % basefile)
continue
tree, ids = ET.XMLID(open(basefile).read())
if (change != bases[0]) and (not 'L' + change in ids):
self.log.warning(
"ID %s not found in %s" % ('L' + change, basefile))
continue
if change != bases[0]:
for e in ids['L' + change].findall(".//{http://www.w3.org/2002/06/xhtml2/}dd"):
if 'property' in e.attrib and e.attrib['property'] == 'dct:title':
title = e.text
else:
title = tree.find(
".//{http://www.w3.org/2002/06/xhtml2/}title").text
# use relative, non-rinfo uri:s here - since the atom
# transform wont go through xslt and use uri.xslt
uri = '/%s' % bases[0]
for node in ids['L' + change]:
m = self.re_qname.match(node.tag)
if m.group(2) == 'dl':
content = self._element_to_string(node)
entry = {'title': title,
'timestamp': timestamp,
'id': change,
'uri': uri,
'content': '<p><a href="%s">F\xf6rfattningstext</a></p>%s' % (uri, content)}
all_entries.append(entry)
basetitle = self.re_sfsnr.sub('', title)
# print "%s: %s" % (change, basetitle)
if (basetitle.startswith('Lag ') or
(basetitle.endswith('lag') and not basetitle.startswith('F\xf6rordning')) or
basetitle.endswith('balk')):
lag_entries.append(entry)
else:
ovr_entries.append(entry)
htmlfile = "%s/%s/generated/news/all.html" % (
self.config.datadir, self.alias)
atomfile = "%s/%s/generated/news/all.atom" % (
self.config.datadir, self.alias)
self._render_newspage(htmlfile, atomfile, 'Nya och \xe4ndrade f\xf6rfattningar', 'De senaste 30 dagarna', all_entries)
htmlfile = "%s/%s/generated/news/lagar.html" % (
self.config.datadir, self.alias)
atomfile = "%s/%s/generated/news/lagar.atom" % (
self.config.datadir, self.alias)
self._render_newspage(htmlfile, atomfile, 'Nya och \xe4ndrade lagar',
'De senaste 30 dagarna', lag_entries)
htmlfile = "%s/%s/generated/news/forordningar.html" % (
self.config.datadir, self.alias)
atomfile = "%s/%s/generated/news/forordningar.atom" % (
self.config.datadir, self.alias)
self._render_newspage(htmlfile, atomfile, 'Nya och \xe4ndrade f\xf6rordningar och \xf6vriga f\xf6rfattningar', 'De senaste 30 dagarna', ovr_entries)
def _element_to_string(self, e):
"""Creates a XHTML1 string from a elementtree.Element,
removing namespaces and rel/propery attributes"""
m = self.re_qname.match(e.tag)
tag = m.group(2)
if list(e.attrib.keys()):
attributestr = " " + " ".join([x + '="' + e.attrib[x].replace('"', '"') + '"' for x in list(e.attrib.keys()) if x not in ['rel', 'property']])
else:
attributestr = ""
childstr = ''
for child in e:
childstr += self._element_to_string(child)
text = ''
tail = ''
if e.text:
text = cgi.escape(e.text)
if e.tail:
tail = cgi.escape(e.tail)
return "<%s%s>%s%s%s</%s>" % (tag, attributestr, text, childstr, tail, tag)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Intermediate base class containing some small functionality useful
# for handling data sources of swedish law.
import os
from datetime import datetime, date
import difflib
import re
from rdflib import Literal, Namespace, URIRef, RDF, RDFS, Graph
from six import text_type as str
from ferenda import util
from ferenda import DocumentRepository, Describer
from ferenda.elements import Paragraph, Section
class Stycke(Paragraph):
pass
class Sektion(Section):
pass
class SwedishLegalSource(DocumentRepository):
namespaces = ['rdf', 'rdfs', 'xsd', 'dct', 'skos', 'foaf',
'xhv', 'owl', 'prov', 'bibo',
('rpubl','http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#'),
('rinfoex', 'http://lagen.nu/terms#')]
swedish_ordinal_list = ('första', 'andra', 'tredje', 'fjärde',
'femte', 'sjätte', 'sjunde', 'åttonde',
'nionde', 'tionde', 'elfte', 'tolfte')
swedish_ordinal_dict = dict(list(zip(
swedish_ordinal_list, list(range(1, len(swedish_ordinal_list) + 1)))))
def _swedish_ordinal(self, s):
sl = s.lower()
if sl in self.swedish_ordinal_dict:
return self.swedish_ordinal_dict[sl]
return None
def _load_resources(self, resource_path):
# returns a mapping [resource label] => [resource uri]
# resource_path is given relative to the current source code file
if not resource_path.startswith(os.sep):
resource_path = os.path.normpath(
os.path.dirname(__file__) + os.sep + resource_path)
graph = Graph()
graph.load(resource_path, format='n3')
d = {}
for uri, label in graph.subject_objects(RDFS.label):
d[str(label)] = str(uri)
return d
def lookup_resource(self, resource_label, cutoff=0.8, warn=True):
"""Given a text label refering to some kind of organization,
person or other entity, eg. 'Justitiedepartementet Gransk',
return a URI for that entity. The text label does not need to
match exactly byte-for-byte, a fuzziness matching function
returns any reasonably similar (adjusted by the cutoff
parameter) entity."""
keys = []
if not hasattr(self, 'org_resources'):
self.org_resources = self._load_resources("../../../res/etc/authrec.n3")
for (key, value) in list(self.org_resources.items()):
if resource_label.lower().startswith(key.lower()):
return URIRef(value)
else:
keys.append(key)
fuzz = difflib.get_close_matches(resource_label, keys, 1, cutoff)
if fuzz:
if warn:
self.log.warning("Assuming that '%s' should be '%s'?" %
(resource_label, fuzz[0]))
return self.lookup_resource(fuzz[0])
else:
self.log.warning("No good match for '%s'" % (resource_label))
raise KeyError(resource_label)
def lookup_label(self, resource):
if not hasattr(self, 'org_resources'):
self.org_resources = self._load_resources("../res/etc/authrec.n3")
for (key, value) in list(self.org_resources.items()):
if resource == value:
return key
raise KeyError(resource)
def sameas_uri(self, uri):
# maps <http://lagen.nu/publ/dir/2012:35> to
# <http://rinfo.lagrummet.se/publ/dir/2012:35>
assert uri.startswith(self.config['url'])
return uri.replace(self.config['url'], 'http://rinfo.lagrummet.se/')
def parse_iso_date(self, datestr):
# only handles YYYY-MM-DD now. Look into dateutil or isodate
# for more complete support of all ISO 8601 variants
return datetime.strptime(datestr, "%Y-%m-%d")
def parse_swedish_date(self, datestr):
# assume strings on the form "3 februari 2010"
months = {"januari": 1,
"februari": 2,
"mars": 3,
"april": 4,
"maj": 5,
"juni": 6,
"juli": 7,
"augusti": 8,
"september": 9,
"oktober": 10,
"november": 11,
"december": 12,
"år": 12}
# strings on the form "vid utgången av december 1999"
if datestr.startswith("vid utgången av"):
import calendar
(x, y, z, month, year) = datestr.split()
month = months[month]
year = int(year)
day = calendar.monthrange(year, month)[1]
else:
# assume strings on the form "3 februari 2010"
(day, month, year) = datestr.split()
day = int(day)
month = months[month]
year = int(year)
return date(year, month, day)
def infer_triples(self, d, basefile):
try:
identifier = d.getvalue(self.ns['dct'].identifier)
# if the identifier is incomplete, eg "2010/11:68" instead
# of "Prop. 2010/11:68", the following triggers a
# ValueError, which is handled the same as if no
# identifier is available at all.
(doctype, arsutgava, lopnummer) = re.split("[ :]", identifier)
except (KeyError, ValueError):
# Create one from basefile. First guess prefix
if self.rdf_type == self.ns['rpubl'].Direktiv:
prefix = "Dir. "
elif self.rdf_type == self.ns['rpubl'].Utredningsbetankande:
if d.getvalue(self.ns['rpubl'].utrSerie) == "http://rinfo.lagrummet.se/serie/utr/ds":
prefix = "Ds "
else:
prefix = "SOU "
elif self.rdf_type == self.ns['rpubl'].Proposition:
prefix = "Prop. "
elif self.rdf_type == self.ns['rpubl'].Forordningsmotiv:
prefix = "Fm "
else:
raise ValueError("Cannot create dct:identifer for rdf_type %r" % self.rdf_type)
identifier = "%s%s" % (prefix, basefile)
self.log.warning(
"%s: No dct:identifier, assuming %s" % (basefile, identifier))
d.value(self.ns['dct'].identifier, identifier)
self.log.debug("Identifier %s" % identifier)
(doctype, arsutgava, lopnummer) = re.split("[ :]", identifier)
d.value(self.ns['rpubl'].arsutgava, arsutgava)
d.value(self.ns['rpubl'].lopnummer, lopnummer)
def toc_query(self):
return """PREFIX dct:<http://purl.org/dc/terms/>
PREFIX rpubl:<http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#>
SELECT DISTINCT ?uri ?title ?identifier ?arsutgava ?lopnummer ?departement
FROM <%s>
WHERE {?uri dct:title ?title;
dct:identifier ?identifier;
rpubl:arsutgava ?arsutgava;
rpubl:lopnummer ?lopnummer;
rpubl:departement ?departement;
}""" % self.context()
def toc_criteria(self):
return (
{'predicate': self.ns['rpubl']['arsutgava'],
'binding': 'arsutgava',
'label': 'Efter årtal',
'sorter': cmp,
'pages': []},
{'predicate': self.ns['dct']['title'],
'binding': 'title',
'label': 'Efter rubrik',
'selector': lambda x: x[0].lower(),
'sorter': cmp,
'pages': []},
{'predicate': self.ns['rpubl']['departement'],
'binding': 'departement',
'label': 'Efter departement',
'selector': self.lookup_label,
'sorter': cmp,
'pages': []},
)
def toc_item(self, binding, row):
return {'uri': row['uri'],
'label': row['identifier'] + ": " + row['title']}
| Python |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import re
from . import Regeringen
# See SOU.py for discussion about possible other sources
class Ds(Regeringen):
module_dir = "ds"
re_basefile_strict = re.compile(r'Ds (\d{4}:\d+)')
re_basefile_lax = re.compile(r'(?:Ds|) ?(\d{4}:\d+)', re.IGNORECASE)
def __init__(self, options):
super(Ds, self).__init__(options)
self.document_type = self.DS
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A abstract base class for fetching and parsing regulations from
# various swedish government agencies. These PDF documents often have
# a similar structure both graphically and linguistically, enabling us
# to parse them in a generalized way. (Downloading them often requires
# special-case code, though.)
from __future__ import unicode_literals,print_function
import os
import re
import datetime
import logging
import codecs
from tempfile import mktemp
from xml.sax.saxutils import escape as xml_escape
import six
if six.PY3:
from urllib.parse import urlsplit, urlunsplit
else:
from urlparse import urlsplit, urlunsplit
from rdflib import Graph, URIRef, Literal
from bs4 import BeautifulSoup
import requests
from ferenda import DocumentRepository
from ferenda import TextReader
from ferenda.legalref import LegalRef
from ferenda import util
from . import SwedishLegalSource
class MyndFskr(SwedishLegalSource):
source_encoding = "utf-8"
downloaded_suffix = ".pdf"
alias = 'myndfskr'
# Very slightly adapted from documentrepository -- should we make
# it easier to specify that the main downloaded resources need not
# be .html files?
@classmethod
def list_basefiles_for(cls, funcname, base_dir):
if funcname == "parse_all":
directory = os.path.sep.join(
(base_dir, cls.alias, "downloaded"))
suffix = ".pdf"
elif funcname in ("generate_all", "relate_all", "news"):
directory = os.path.sep.join((base_dir, cls.alias, "parsed"))
suffix = ".xhtml"
for x in util.list_dirs(directory, suffix, reverse=True):
# There might be additional documents strewn about in the
# download directories, like "4-beslutspm_ny_frl_2011.pdf"
# (the FFFS downloader creates these). Don't include
# them.
if "-" in x or "_" in x:
continue
yield cls.basefile_from_path(x)
def download_everything(self, usecache=False):
"""Simple default implementation that downloads all PDF files
from self.start_url that look like regulation document
numbers."""
self.browser.open(self.start_url)
# regex to search the link url, text or title for something
# looking like a FS number
re_fsnr = re.compile('(\d{4})[:/_-](\d+)(|\.\w+)$')
for link in self.browser.links(url_regex='.[pP][dD][fF]$'):
done = False
# print "Examining %s" % link
attrs = dict(link.attrs)
flds = [link.url, link.text]
if 'title' in attrs:
flds.append(attrs['title'])
for fld in flds:
if re_fsnr.search(fld) and not done:
m = re_fsnr.search(fld)
# Make sure we end up with "2011:4" rather than
# "2011:04"
basefile = "%s:%s" % (m.group(1), int(m.group(2)))
self.download_single(basefile, usecache, link.absolute_url)
done = True
def canonical_uri(self, basefile):
# The canonical URI for these documents cannot always be
# computed from the basefile. Find the primary subject of the
# distilled RDF graph instead.
if not os.path.exists(self.store.distilled_path(basefile)):
return None
g = Graph()
g.parse(self.store.distilled_path(basefile))
subjects = list(g.subject_objects(self.ns['rdf']['type']))
if subjects:
return str(subjects[0][0])
else:
self.log.warning(
"No canonical uri in %s" % (self.distilled_path(basefile)))
# fall back
return super(MyndFskr, self).canonical_uri(basefile)
def textreader_from_basefile(self, basefile, encoding):
infile = self.downloaded_path(basefile)
tmpfile = self.store.path(basefile, 'intermediate', '.pdf')
outfile = self.store.path(basefile, 'intermediate', '.txt')
util.copy_if_different(infile, tmpfile)
util.runcmd("pdftotext %s" % tmpfile, require_success=True)
util.robust_remove(tmpfile)
return TextReader(outfile, encoding=encoding, linesep=TextReader.UNIX)
def rpubl_uri_transform(self, s):
# Inspired by
# http://code.activestate.com/recipes/81330-single-pass-multiple-replace/
table = {'å': 'aa',
'ä': 'ae',
'ö': 'oe'}
r = re.compile("|".join(list(table.keys())))
# return r.sub(lambda f: table[f.string[f.start():f.end()]], s.lower())
return r.sub(lambda m: table[m.group(0)], s.lower())
def download_resource_lists(self, resource_url, graph_path):
hdr = self._addheaders()
hdr['Accept'] = 'application/rdf+xml'
resp = requests.get(resource_url, headers=hdr)
g = Graph()
g.parse(data=resp.text, format="xml")
for subj in g.subjects(self.ns['rdf'].type,
self.ns['rpubl'].Forfattningssamling):
resp = requests.get(str(subj), headers=hdr)
resp.encoding = "utf-8"
g.parse(data=resp.text, format="xml")
with open(graph_path,"wb") as fp:
data = g.serialize(format="xml")
fp.write(data)
def parse_from_textreader(self, reader, basefile):
tracelog = logging.getLogger("%s.tracelog" % self.alias)
doc = self.make_document(basefile)
g = doc.meta
# 1.2: Load known entities and their URIs (we have to add some
# that are not yet in the official resource lists
resource_list_file = self.store.path('resourcelist','intermediate','.rdf')
if not os.path.exists(resource_list_file):
self.download_resource_lists("http://service.lagrummet.se/var/common",
resource_list_file)
resources = Graph()
resources.parse(resource_list_file, format="xml")
# 1.3: Define regexps for the data we search for.
fwdtests = {'dct:issn': ['^ISSN (\d+\-\d+)$'],
'dct:title': ['((?:Föreskrifter|[\w ]+s (?:föreskrifter|allmänna råd)).*?)\n\n'],
'dct:identifier': ['^([A-ZÅÄÖ-]+FS\s\s?\d{4}:\d+)$'],
'rpubl:utkomFranTryck': ['Utkom från\strycket\s+den\s(\d+ \w+ \d{4})'],
'rpubl:omtryckAv': ['^(Omtryck)$'],
'rpubl:genomforDirektiv': ['Celex (3\d{2,4}\w\d{4})'],
'rpubl:beslutsdatum': ['(?:har beslutats|beslutade|beslutat) den (\d+ \w+ \d{4})'],
'rpubl:beslutadAv': ['\n([A-ZÅÄÖ][\w ]+?)\d? (?:meddelar|lämnar|föreskriver)',
'\s(?:meddelar|föreskriver) ([A-ZÅÄÖ][\w ]+?)\d?\s'],
'rpubl:bemyndigande': [' ?(?:meddelar|föreskriver|Föreskrifterna meddelas|Föreskrifterna upphävs)\d?,? (?:följande |)med stöd av\s(.*?) ?(?:att|efter\ssamråd|dels|följande|i fråga om|och lämnar allmänna råd|och beslutar följande allmänna råd|\.\n)',
'^Med stöd av (.*)\s(?:meddelar|föreskriver)']
}
# 2: Find metadata properties
# 2.1 Find some of the properties on the first page (or the
# 2nd, or 3rd... continue past TOC pages, cover pages etc
# until the "real" first page is found) NB: FFFS 2007:1 has
# ten (10) TOC pages!
pagecnt = 0
for page in reader.getiterator(reader.readpage):
# replace single newlines with spaces, but keep double
# newlines
# page = "\n\n".join([util.normalize_space(x) for x in page.split("\n\n")])
pagecnt += 1
props = {}
for (prop, tests) in list(fwdtests.items()):
if prop in props:
continue
for test in tests:
m = re.search(
test, page, re.MULTILINE | re.DOTALL | re.UNICODE)
if m:
props[prop] = util.normalize_space(m.group(1))
# Single required propery. If we find this, we're done
if 'rpubl:beslutsdatum' in props:
break
self.log.warning("%s: Couldn't find required props on page %s" %
(basefile, pagecnt))
# 2.2 Find some of the properties on the last 'real' page (not
# counting appendicies)
reader.seek(0)
pagesrev = reversed(list(reader.getiterator(reader.readpage)))
# The language used to expres these two properties differ
# quite a lot, more than what is reasonable to express in a
# single regex. We therefore define a set of possible
# expressions and try them in turn.
revtests = {'rpubl:ikrafttradandedatum':
['(?:Denna författning|Dessa föreskrifter|Dessa allmänna råd|Dessa föreskrifter och allmänna råd)\d* träder i ?kraft den (\d+ \w+ \d{4})',
'Dessa föreskrifter träder i kraft, (?:.*), i övrigt den (\d+ \w+ \d{4})',
'ska(?:ll|)\supphöra att gälla (?:den |)(\d+ \w+ \d{4}|denna dag|vid utgången av \w+ \d{4})',
'träder i kraft den dag då författningen enligt uppgift på den (utkom från trycket)'],
'rpubl:upphaver':
['träder i kraft den (?:\d+ \w+ \d{4}), då(.*)ska upphöra att gälla',
'ska(?:ll|)\supphöra att gälla vid utgången av \w+ \d{4}, nämligen(.*?)\n\n',
'att (.*) skall upphöra att gälla (denna dag|vid utgången av \w+ \d{4})']
}
cnt = 0
for page in pagesrev:
cnt += 1
# Normalize the whitespace in each paragraph so that a
# linebreak in the middle of the natural language
# expression doesn't break our regexes.
page = "\n\n".join(
[util.normalize_space(x) for x in page.split("\n\n")])
for (prop, tests) in list(revtests.items()):
if prop in props:
continue
for test in tests:
# Not re.DOTALL -- we've normalized whitespace and
# don't want to match across paragraphs
m = re.search(test, page, re.MULTILINE | re.UNICODE)
if m:
props[prop] = util.normalize_space(m.group(1))
#print u"%s: '%s' resulted in match '%s' at page %s from end" % (prop,test,props[prop], cnt)
# Single required propery. If we find this, we're done
if 'rpubl:ikrafttradandedatum' in props:
break
# 3: Clean up data - converting strings to Literals or
# URIRefs, find legal references, etc
if 'dct:identifier' in props:
(publication, year, ordinal) = re.split('[ :]',
props['dct:identifier'])
# FIXME: Read resources graph instead
fs = resources.value(predicate=self.ns['skos'].altLabel,
object=Literal(publication,lang='sv'))
props['rpubl:forfattningssamling'] = fs
publ = resources.value(subject=fs,
predicate=self.ns['dct'].publisher)
props['dct:publisher'] = publ
props['rpubl:arsutgava'] = Literal(
year) # conversion to int, date not needed
props['rpubl:lopnummer'] = Literal(ordinal)
props['dct:identifier'] = Literal(props['dct:identifier'])
# Now we can mint the uri (should be done through LegalURI)
uri = ("http://rinfo.lagrummet.se/publ/%s/%s:%s" %
(props['rpubl:forfattningssamling'].split('/')[-1],
props['rpubl:arsutgava'],
props['rpubl:lopnummer']))
self.log.debug("URI: %s" % uri)
else:
self.log.error(
"Couldn't find dct:identifier, cannot create URI, giving up")
return None
tracelog.info("Cleaning rpubl:beslutadAv")
if 'rpubl:beslutadAv' in props:
agency = resources.value(predicate=self.ns['foaf'].name,
object=Literal(props['rpubl:beslutadAv'],
lang="sv"))
if agency:
props['rpubl:beslutadAv'] = agency
else:
self.log.warning("Cannot find URI for rpubl:beslutadAv value %r" % props['rpubl:beslutadAv'])
del props['rpubl:beslutadAv']
tracelog.info("Cleaning dct:issn")
if 'dct:issn' in props:
props['dct:issn'] = Literal(props['dct:issn'])
tracelog.info("Cleaning dct:title")
# common false positive
if 'dct:title' in props and 'denna f\xf6rfattning har beslutats den' in props['dct:title']:
del props['dct:title']
if 'dct:title' in props:
tracelog.info("Inspecting dct:title %r" % props['dct:title'])
# sometimes the title isn't separated with two newlines from the rest of the text
if "\nbeslutade den " in props['dct:title']:
props['dct:title'] = props[
'dct:title'].split("\nbeslutade den ")[0]
props['dct:title'] = Literal(
util.normalize_space(props['dct:title']), lang="sv")
if re.search('^(Föreskrifter|[\w ]+s föreskrifter) om ändring i ', props['dct:title'], re.UNICODE):
tracelog.info("Finding rpubl:andrar in dct:title")
orig = re.search(
'([A-ZÅÄÖ-]+FS \d{4}:\d+)', props['dct:title']).group(0)
(publication, year, ordinal) = re.split('[ :]', orig)
origuri = "http://rinfo.lagrummet.se/publ/%s/%s:%s" % (self.rpubl_uri_transform(publication),
year, ordinal)
props['rpubl:andrar'] = URIRef(origuri)
if 'rpubl:omtryckAv' in props:
props['rpubl:omtryckAv'] = URIRef(origuri)
if (re.search('^(Föreskrifter|[\w ]+s föreskrifter) om upphävande av', props['dct:title'], re.UNICODE)
and not 'rpubl:upphaver' in props):
tracelog.info("Finding rpubl:upphaver in dct:title")
props['rpubl:upphaver'] = six.text_type(
props['dct:title']) # cleaned below
tracelog.info("Cleaning date properties")
for prop in ('rpubl:utkomFranTryck', 'rpubl:beslutsdatum', 'rpubl:ikrafttradandedatum'):
if prop in props:
if (props[prop] == 'denna dag' and
prop == 'rpubl:ikrafttradandedatum'):
props[prop] = props['rpubl:beslutsdatum']
elif (props[prop] == 'utkom från trycket' and
prop == 'rpubl:ikrafttradandedatum'):
props[prop] = props['rpubl:utkomFranTryck']
else:
props[prop] = Literal(
self.parse_swedish_date(props[prop].lower()))
tracelog.info("Cleaning rpubl:genomforDirektiv")
if 'rpubl:genomforDirektiv' in props:
props['rpubl:genomforDirektiv'] = URIRef("http://rinfo.lagrummet.se/ext/eur-lex/%s" %
props['rpubl:genomforDirektiv'])
tracelog.info("Cleaning rpubl:bemyndigande")
has_bemyndiganden = False
if 'rpubl:bemyndigande' in props:
# SimpleParse can't handle unicode endash sign, transform
# into regular ascii hyphen
props['rpubl:bemyndigande'] = props[
'rpubl:bemyndigande'].replace('\u2013', '-')
parser = LegalRef(LegalRef.LAGRUM)
result = parser.parse(props['rpubl:bemyndigande'])
bemyndigande_uris = [x.uri for x in result if hasattr(x, 'uri')]
# some of these uris need to be filtered away due to
# over-matching by parser.parse
filtered_bemyndigande_uris = []
for bem_uri in bemyndigande_uris:
keep = True
for compare in bemyndigande_uris:
if (len(compare) > len(bem_uri) and
compare.startswith(bem_uri)):
keep = False
if keep:
filtered_bemyndigande_uris.append(bem_uri)
for bem_uri in filtered_bemyndigande_uris:
g.add((URIRef(
uri), self.ns['rpubl']['bemyndigande'], URIRef(bem_uri)))
has_bemyndiganden = True
del props['rpubl:bemyndigande']
tracelog.info("Cleaning rpubl:upphaver")
if 'rpubl:upphaver' in props:
for upph in re.findall('([A-ZÅÄÖ-]+FS \d{4}:\d+)', util.normalize_space(props['rpubl:upphaver'])):
(publication, year, ordinal) = re.split('[ :]', upph)
upphuri = "http://rinfo.lagrummet.se/publ/%s/%s:%s" % (publication.lower(),
year, ordinal)
g.add((URIRef(
uri), self.ns['rpubl']['upphaver'], URIRef(upphuri)))
del props['rpubl:upphaver']
tracelog.info("Deciding rdf:type")
if ('dct:title' in props and
"allmänna råd" in props['dct:title'] and
not "föreskrifter" in props['dct:title']):
props['rdf:type'] = self.ns['rpubl']['AllmannaRad']
else:
props['rdf:type'] = self.ns['rpubl']['Myndighetsforeskrift']
# 3.5: Check to see that we have all properties that we expect
# (should maybe be done elsewhere later?)
tracelog.info("Checking required properties")
for prop in ('dct:identifier', 'dct:title', 'rpubl:arsutgava',
'dct:publisher', 'rpubl:beslutadAv', 'rpubl:beslutsdatum',
'rpubl:forfattningssamling', 'rpubl:ikrafttradandedatum',
'rpubl:lopnummer', 'rpubl:utkomFranTryck'):
if not prop in props:
self.log.warning("%s: Failed to find %s" % (basefile, prop))
tracelog.info("Checking rpubl:bemyndigande")
if props['rdf:type'] == self.ns['rpubl']['Myndighetsforeskrift']:
if not has_bemyndiganden:
self.log.warning(
"%s: Failed to find rpubl:bemyndigande" % (basefile))
# 4: Add the cleaned data to a RDFLib Graph
# (maybe we should do that as early as possible?)
tracelog.info("Adding items to rdflib.Graph")
for (prop, value) in list(props.items()):
(prefix, term) = prop.split(":", 1)
p = self.ns[prefix][term]
if not (isinstance(value, URIRef) or isinstance(value, Literal)):
self.log.warning("%s: %s is a %s, not a URIRef or Literal" %
(basefile, prop, type(value)))
g.add((URIRef(uri), p, value))
# 5: Create data for the body, removing various control characters
# TODO: Use pdftohtml to create a nice viewable HTML
# version instead of this plaintext stuff
reader.seek(0)
body = []
# A fairly involved way of filtering out all control
# characters from a string
import unicodedata
if six.PY3:
all_chars = (chr(i) for i in range(0x10000))
else:
all_chars = (unichr(i) for i in range(0x10000))
control_chars = ''.join(
c for c in all_chars if unicodedata.category(c) == 'Cc')
# tab and newline are technically Control characters in
# unicode, but we want to keep them.
control_chars = control_chars.replace("\t", "").replace("\n", "")
control_char_re = re.compile('[%s]' % re.escape(control_chars))
for page in reader.getiterator(reader.readpage):
text = xml_escape(control_char_re.sub('', page))
body.append("<pre>%s</pre>\n\n" % text)
# 5: Done!
#
doc.body = body
doc.lang = 'sv'
doc.uri = uri
return doc
def tabs(cls, primary=False):
return [['Myndighetsföreskrifter', '/myndfskr/']]
class SJVFS(MyndFskr):
alias = "sjvfs"
start_url = "http://www.jordbruksverket.se/forfattningar/forfattningssamling.4.5aec661121e2613852800012537.html"
def download_everything(self, usecache=False):
self.browser.open(self.start_url)
soup = BeautifulSoup(self.browser.response())
main = soup.find("ul", "c112")
extra = []
for a in list(main.findAll("a")):
url = urllib.parse.urljoin(self.start_url, a['href'])
self.log.info("Fetching %s %s" % (a.text, url))
extra.extend(self.download_indexpage(url, usecache=usecache))
extra2 = []
for url in list(set(extra)):
self.log.info("Extrafetching %s" % (url))
extra2.extend(self.download_indexpage(url, usecache=usecache))
for url in list(set(extra2)):
self.log.info("Extra2fetching %s" % (url))
self.download_indexpage(url, usecache=usecache)
def download_indexpage(self, url, usecache=False):
try:
self.browser.open(url)
except URLError as e:
self.log.error("Failed to fetch %s: %s" % (url, e))
return []
except:
self.log.error("General error w %s" % url)
return []
subsoup = BeautifulSoup(self.browser.response())
submain = subsoup.find("div", "pagecontent")
extrapages = []
for a in submain.findAll("a"):
if a['href'].endswith(".pdf") or a['href'].endswith(".PDF"):
if re.search('\d{4}:\d+', a.text):
m = re.search('(\w+FS|) ?(\d{4}:\d+)', a.text)
fs = m.group(1).lower()
fsnr = m.group(2)
if not fs:
fs = "sjvfs"
basefile = "%s/%s" % (fs, fsnr)
suburl = urllib.parse.unquote(
urllib.parse.urljoin(url, a['href'])).encode('utf-8')
self.download_single(
basefile, usecache=usecache, url=suburl)
elif a.text == "Besult":
basefile = a.findParent(
"td").findPreviousSibling("td").find("a").text
self.log.debug(
"Will download beslut to %s (later)" % basefile)
elif a.text == "Bilaga":
basefile = a.findParent(
"td").findPreviousSibling("td").find("a").text
self.log.debug(
"Will download bilaga to %s (later)" % basefile)
elif a.text == "Rättelseblad":
basefile = a.findParent(
"td").findPreviousSibling("td").find("a").text
self.log.debug(
"Will download rättelseblad to %s (later)" % basefile)
else:
self.log.debug("I don't know what to do with %s" % a.text)
else:
suburl = urllib.parse.urljoin(url, a['href'])
extrapages.append(suburl)
return extrapages
class DVFS(MyndFskr):
alias = "dvfs"
class FFFS(MyndFskr):
alias = "fffs"
start_url = "http://www.fi.se/Regler/FIs-forfattningar/Forteckning-FFFS/"
document_url = "http://www.fi.se/Regler/FIs-forfattningar/Samtliga-forfattningar/%s/"
def download_everything(self, usecache=False):
self.browser.open(self.start_url)
soup = BeautifulSoup(self.browser.response())
main = soup.find(id="mainarea")
docs = []
for numberlabel in main.findAll(text='NUMMER'):
numberdiv = numberlabel.findParent('div').parent
typediv = numberdiv.findNextSibling()
if util.element_text(typediv.find('div', 'FFFSListAreaLeft')) != "TYP":
self.log.error("Expected TYP in div, found %s" %
util.element_text(typediv))
continue
titlediv = typediv.findNextSibling()
if util.element_text(titlediv.find('div', 'FFFSListAreaLeft')) != "RUBRIK":
self.log.error("Expected RUBRIK in div, found %s" %
util.element_text(titlediv))
continue
number = util.element_text(
numberdiv.find('div', 'FFFSListAreaRight'))
tmpfile = mktemp()
snippetfile = self.downloaded_path(
number).replace(".pdf", ".snippet.html")
fp = codecs.open(tmpfile, "w", encoding="utf-8")
fp.write(str(numberdiv))
fp.write(str(typediv))
fp.write(str(titlediv))
fp.close()
util.replace_if_different(tmpfile, snippetfile)
self.download_single(number, usecache)
def download_single(self, basefile, usecache=False):
self.log.debug("%s: download_single..." % basefile)
pdffile = self.downloaded_path(basefile)
existed = os.path.exists(pdffile)
if usecache and existed:
self.log.debug("%s: already exists, not downloading" % basefile)
return
snippetfile = pdffile.replace(".pdf", ".snippet.html")
descriptionfile = pdffile.replace(".pdf", ".html")
soup = BeautifulSoup(open(snippetfile))
href = soup.find(text="RUBRIK").findParent(
"div").findPreviousSibling().find('a')['href']
url = urllib.parse.urljoin("http://www.fi.se/Regler/FIs-forfattningar/Forteckning-FFFS/", href)
if href.endswith(".pdf"):
if self.download_if_needed(url, pdffile):
if existed:
self.log.info("%s: downloaded new version from %s" %
(basefile, url))
else:
self.log.info("%s: downloaded from %s" % (basefile, url))
elif "/Samtliga-forfattningar/" in href:
self.log.debug("%s: Separate page" % basefile)
self.download_if_needed(url, descriptionfile)
soup = BeautifulSoup(open(descriptionfile))
for link in soup.find("div", id="mainarea").findAll("a"):
suburl = urllib.parse.urljoin(
url, link['href']).replace(" ", "%20")
if link.text == 'Grundförfattning':
if self.download_if_needed(suburl, pdffile):
self.log.info("%s: downloaded main PDF" % basefile)
elif link.text == 'Konsoliderad version':
conspdffile = pdffile.replace(".pdf", "_k.pdf")
if self.download_if_needed(suburl, conspdffile):
self.log.info(
"%s: downloaded consolidated PDF" % basefile)
elif link.text == 'Ändringsförfattning':
self.log.info("Skipping change regulation")
elif link['href'].endswith(".pdf"):
filename = link['href'].split("/")[-1]
otherpdffile = pdffile.replace(".pdf", "-" + filename)
if self.download_if_needed(suburl, otherpdffile):
self.log.info("%s: downloaded '%s' to %s" %
(basefile, link.text, otherpdffile))
else:
self.log.warning("%s: No idea!" % basefile)
class ELSAKFS(MyndFskr):
alias = "elsakfs" # real name is ELSÄK-FS, but avoid swedchars, uppercase and dashes
uri_slug = "elsaek-fs" # for use in
start_url = "http://www.elsakerhetsverket.se/sv/Lag-och-ratt/Foreskrifter/Elsakerhetsverkets-foreskrifter-listade-i-nummerordning/"
class NFS(MyndFskr):
alias = "nfs" # real name is ELSÄK-FS, but avoid swedchars, uppercase and dashes
start_url = "http://www.naturvardsverket.se/sv/Start/Lagar-och-styrning/Foreskrifter-och-allmanna-rad/Foreskrifter/"
class STAFS(MyndFskr):
alias = "stafs"
re_identifier = re.compile('STAFS (\d{4})[:/_-](\d+)')
start_url = "http://www.swedac.se/sv/Det-handlar-om-fortroende/Lagar-och-regler/Alla-foreskrifter-i-nummerordning/"
def download_everything(self, usecache=False):
self.browser.open(self.start_url)
for link in list(self.browser.links(url_regex='/STAFS/'))[0:1]:
basefile = re.search('\d{4}:\d+', link.text).group(0)
self.download_single(basefile, usecache, link.absolute_url)
def download_single(self, basefile, usecache=False, url=None):
self.log.info("%s: %s" % (basefile, url))
self.browser.open(url)
consolidated_link = None
newest = None
for link in self.browser.links(text_regex=self.re_identifier):
# for link in self.browser.links():
self.log.info(" %s: %s %s" % (basefile, link.text, link.url))
if "konso" in link.text:
consolidated_link = link
else:
m = self.re_identifier.search(link.text)
assert m
if link.url.endswith(".pdf"):
basefile = m.group(1) + ":" + m.group(2)
filename = self.downloaded_path(basefile)
self.log.info(" Downloading to %s" % filename)
self.download_if_needed(link.absolute_url, filename)
if basefile > newest:
self.log.debug(
"%s larger than %s" % (basefile, newest))
consolidated_basefile = basefile + \
"/konsoliderad/" + basefile
newest = basefile
else:
self.log.debug(
"%s not larger than %s" % (basefile, newest))
else:
# not pdf - link to yet another pg
self.browser.follow_link(link)
for sublink in self.browser.links(text_regex=self.re_identifier):
self.log.info(" Sub %s: %s %s" %
(basefile, sublink.text, sublink.url))
m = self.re_identifier.search(sublink.text)
assert m
if sublink.url.endswith(".pdf"):
subbasefile = m.group(1) + ":" + m.group(2)
subfilename = self.downloaded_path(subbasefile)
self.log.info(
" SubDownloading to %s" % subfilename)
self.download_if_needed(
sublink.absolute_url, subfilename)
self.browser.back()
if consolidated_link:
filename = self.downloaded_path(consolidated_basefile)
self.log.info(" Downloading consd to %s" % filename)
self.download_if_needed(consolidated_link.absolute_url, filename)
| Python |
# flake8: noqa
from rdflib import Namespace
RPUBL = Namespace('http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#')
from .swedishlegalsource import SwedishLegalSource
from .regeringen import Regeringen
from .riksdagen import Riksdagen
from .arn import ARN
from .direktiv import Direktiv
from .ds import Ds
from .dv import DV
from .jk import JK
from .jo import JO
from .kommitte import Kommitte
from .myndfskr import MyndFskr
from .propositioner import Propositioner
from .sfs import SFS
from .skvfs import SKVFS
from .sou import SOU
| Python |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import sys
import os
import re
import datetime
from collections import deque, defaultdict
import xml.etree.cElementTree as ET
import xml.etree.ElementTree as PET
from pprint import pprint
from operator import itemgetter
import subprocess
from rdflib import Namespace, URIRef, Literal, RDF, BNode
from rdflib import Graph
from rdflib.collection import Collection
try:
from whoosh import analysis, fields, formats, query, qparser, scoring
from whoosh.filedb.filestore import RamStorage, FileStorage
whoosh_available = True
except ImportError:
whoosh_available = False
try:
import networkx
from networkx.algorithms.link_analysis.pagerank_alg import pagerank
from networkx.algorithms.link_analysis.hits_alg import hits
networkx_available = True
except ImportError:
networkx_available = False
try:
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import numpy.numarray as na
matplotlib_available = True
except ImportError:
matplotlib_available = False
from ferenda import TripleStore
from ferenda import DocumentRepository
from ferenda import util
from ferenda import legaluri
from ferenda.legalref import LegalRef, Link
from ferenda.elements import UnicodeElement, CompoundElement, OrdinalElement, serialize
__version__ = (1, 6)
__author__ = "Staffan Malmgren <staffan@tomtebo.org>"
# The general outline of a treaty is:
# <Body> C
# <Paragraph> C (unicode/Link) - starting and ending titles
# <Preamble> C
# <Paragraph> - the typographic term, aka "Stycke"
# <Part> CO - not present for TEU
# <Title> CO
# <Chapter> CO
# <Section> CO
# <Article> CO
# <Subarticle> CO
# <Paragraph> C
# <unicode>
# <Link>
# <UnordedList leader="dash"> C
# <ListItem> C
# <OrderedList type="letter"> CO
class IDElement(object):
id = None
attrs = None
class Body(CompoundElement, IDElement):
pass
class Paragraph(CompoundElement, IDElement):
pass
class Preamble(CompoundElement, IDElement):
pass
class Part(CompoundElement, IDElement, OrdinalElement):
pass
class Title(CompoundElement, IDElement, OrdinalElement):
pass
class Chapter(CompoundElement, IDElement, OrdinalElement):
pass
class Section(CompoundElement, IDElement, OrdinalElement):
pass
class Article(CompoundElement, IDElement, OrdinalElement):
fragment_label = "A"
rdftype = "eurlex:Article"
class Subarticle(CompoundElement, IDElement, OrdinalElement):
fragment_label = "P"
rdftype = "eurlex:Subarticle"
class UnorderedList(CompoundElement, IDElement):
pass
class OrderedList(CompoundElement, IDElement, OrdinalElement):
pass
class ListItem(CompoundElement, IDElement):
fragment_label = "L"
rdftype = "eurlex:ListItem"
DCT = Namespace(util.ns['dct'])
XSD = Namespace(util.ns['xsd'])
RINFOEX = Namespace("http://lagen.nu/terms#")
EX = Namespace(URIRef("http://www.example.org/"))
class EurlexTreaties(DocumentRepository):
# overrides of superclass variables
module_dir = "eut" # European Union Treaties
start_url = "http://eur-lex.europa.eu/LexUriServ/LexUriServ.do?uri=OJ:C:2008:115:0001:01:EN:HTML"
document_url = "http://eur-lex.europa.eu/LexUriServ/LexUriServ.do?uri=OJ:C:2008:115:0001:01:EN:HTML#%s"
source_encoding = "utf-8"
# own class variables
vocab_url = "http://lagen.nu/eurlex#"
################################################################
# Downloading
def download_everything(self, cache=False):
self.log.info("Hello")
self.download_single("teu")
self.download_single("tfeu")
################################################################
# Parsing
re_part = re.compile("PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN)$").match
re_title = re.compile("TITLE ([IVX]+)$").match
re_chapter = re.compile("CHAPTER (\d+)$").match
re_section = re.compile("SECTION (\d+)$").match
re_article = re.compile("Article (\d+)$").match
re_subarticle = re.compile("^(\d+)\. ").search
re_unorderedliststart = re.compile("^- ").search
re_orderedliststart = re.compile("^\(\w\) ").search
re_romanliststart = re.compile("^\([ivx]+\) ").search
ordinal_list = ('ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN',
'EIGHT', 'NINE', 'TEN', 'ELEVEN', 'TWELVE')
ordinal_dict = dict(
list(zip(ordinal_list, list(range(1, len(ordinal_list) + 1)))))
# Example code from http://www.diveintopython.org/
roman_numeral_map = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def _from_roman(self, s):
"""convert Roman numeral to integer"""
result = 0
index = 0
for numeral, integer in self.roman_numeral_map:
while s[index:index + len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
def parse_from_soup(self, soup, basefile):
g = Graph()
g.bind('dct', self.ns['dct'])
self.log.info("%s: Parsing" % basefile)
if basefile == "teu":
# FIXME: Use a better base URI?
uri = 'http://lagen.nu/ext/celex/12008M'
startnode = soup.findAll(text="-" * 50)[1].parent
g.add((URIRef(uri), DCT['title'], Literal(
"Treaty on European Union", lang="en")))
elif basefile == "tfeu":
uri = 'http://lagen.nu/ext/celex/12008E'
startnode = soup.findAll(text="-" * 50)[2].parent
g.add((URIRef(uri), DCT['title'], Literal("Treaty on the Functioning of the European Union", lang="en")))
lines = deque()
for p in startnode.findNextSiblings("p"):
if p.string == "-" * 50:
self.log.info("found the end")
break
else:
if p.string:
lines.append(str(p.string))
self.log.info("%s: Found %d lines" % (basefile, len(lines)))
body = self.make_body(lines)
self.process_body(body, '', uri)
# print serialize(body)
return {'meta': g,
'body': body,
'lang': 'en',
'uri': uri}
# To make Paragraph and our other stuff available to Genshi
def get_globals(self):
return globals()
def make_body(self, lines):
b = Body()
while lines:
line = lines.popleft()
if line == "PREAMBLE":
b.append(self.make_preamble(lines))
elif self.re_title(line):
lines.appendleft(line)
b.append(self.make_title(lines))
elif self.re_part(line):
lines.appendleft(line)
b.append(self.make_part(lines))
else:
b.append(Paragraph([line]))
# print type(b[-1])
return b
def make_preamble(self, lines):
p = Preamble(title="PREAMBLE")
while lines:
line = lines.popleft()
if (self.re_part(line) or self.re_title(line)):
lines.appendleft(line)
return p
else:
p.append(Paragraph([line]))
self.log.warn("make_preamble ran out of lines!")
return p
def make_part(self, lines):
partnumber = lines.popleft()
ordinal = self.ordinal_dict[self.re_part(partnumber).group(1)]
parttitle = lines.popleft()
p = Part(ordinal=ordinal, ordinaltitle=partnumber, title=parttitle)
while lines:
line = lines.popleft()
if (self.re_part(line)):
lines.appendleft(line)
return p
elif (self.re_title(line)):
lines.appendleft(line)
p.append(self.make_title(lines))
elif (self.re_article(line)):
# print "make_part: %s matches article" % line
lines.appendleft(line)
p.append(self.make_article(lines))
else:
p.append(Paragraph([line]))
self.log.warn(
"make_part appended naked Paragraph '%s...'" % line[:25])
return p
def make_title(self, lines):
titlenumber = lines.popleft()
ordinal = self._from_roman(self.re_title(titlenumber).group(1))
titletitle = lines.popleft()
t = Title(ordinal=ordinal, ordinaltitle=titlenumber, title=titletitle)
while lines:
line = lines.popleft()
if (self.re_part(line) or self.re_title(line)):
lines.appendleft(line)
return t
elif (self.re_chapter(line)):
lines.appendleft(line)
t.append(self.make_chapter(lines))
elif (self.re_article(line)):
# print "make_title: %s matches article" % line
lines.appendleft(line)
t.append(self.make_article(lines))
else:
t.append(Paragraph([line]))
self.log.warn(
"make_title appended naked Paragraph '%s...'" % line[:25])
return t
def make_chapter(self, lines):
chapternumber = lines.popleft()
ordinal = int(self.re_chapter(chapternumber).group(1))
chaptertitle = lines.popleft()
c = Chapter(
ordinal=ordinal, ordinaltitle=chapternumber, title=chaptertitle)
while lines:
line = lines.popleft()
if (self.re_part(line) or
self.re_title(line) or
self.re_chapter(line)):
lines.appendleft(line)
return c
elif (self.re_section(line)):
lines.appendleft(line)
c.append(self.make_section(lines))
elif (self.re_article(line)):
# print "make_chapter: %s matches article" % line
lines.appendleft(line)
c.append(self.make_article(lines))
else:
c.append(Paragraph([line]))
self.log.warn("make_chapter appended naked Paragraph '%s...'" %
line[:25])
return c
def make_section(self, lines):
sectionnumber = lines.popleft()
ordinal = int(self.re_section(sectionnumber).group(1))
sectiontitle = lines.popleft()
s = Section(
ordinal=ordinal, ordinaltitle=sectionnumber, title=sectiontitle)
while lines:
line = lines.popleft()
if (self.re_part(line) or
self.re_title(line) or
self.re_chapter(line) or
self.re_section(line)):
lines.appendleft(line)
return s
elif (self.re_article(line)):
# print "make_section: %s matches article" % line
lines.appendleft(line)
s.append(self.make_article(lines))
else:
s.append(Paragraph([line]))
self.log.warn("make_section appended naked Paragraph '%s...'" %
line[:25])
return s
def make_article(self, lines):
articlenumber = lines.popleft()
ordinal = int(self.re_article(articlenumber).group(1))
self.log.info("Making article: %s" % ordinal)
exarticlenumber = lines.popleft()
if not exarticlenumber.startswith("(ex Article"):
lines.appendleft(exarticlenumber)
a = Article(ordinal=ordinal, ordinaltitle=articlenumber)
else:
a = Article(ordinal=ordinal, ordinaltitle=articlenumber,
exarticlenumber=exarticlenumber)
while lines:
line = lines.popleft()
if (self.re_part(line) or
self.re_title(line) or
self.re_chapter(line) or
self.re_section(line) or
self.re_article(line)):
lines.appendleft(line)
return a
elif (self.re_subarticle(line)):
lines.appendleft(line)
a.append(self.make_subarticle(lines))
elif (self.re_unorderedliststart(line)):
lines.appendleft(line)
a.append(self.make_unordered_list(lines, "dash"))
elif (self.re_orderedliststart(line)):
lines.appendleft(line)
a.append(self.make_ordered_list(lines, "lower-alpha"))
else:
# print "Appending %s" % line[:40]
a.append(Paragraph([line]))
return a
def make_subarticle(self, lines):
line = lines.popleft()
subarticlenum = int(self.re_subarticle(line).group(1))
# self.log.info("Making subarticle %d: %s" % (subarticlenum, line[:30]))
s = Subarticle(ordinal=subarticlenum)
lines.appendleft(line)
while lines:
line = lines.popleft()
if (self.re_part(line) or
self.re_title(line) or
self.re_chapter(line) or
self.re_section(line) or
self.re_article(line)):
lines.appendleft(line)
return s
elif (self.re_subarticle(line) and
int(self.re_subarticle(line).group(1)) != subarticlenum):
lines.appendleft(line)
return s
elif (self.re_unorderedliststart(line)):
lines.appendleft(line)
s.append(self.make_unordered_list(lines, "dash"))
elif (self.re_orderedliststart(line)):
lines.appendleft(line)
s.append(self.make_ordered_list(lines, "lower-alpha"))
else:
# this is OK
s.append(Paragraph([line]))
return s
def make_unordered_list(self, lines, style):
ul = UnorderedList(style=style)
while lines:
line = lines.popleft()
if not self.re_unorderedliststart(line):
lines.appendleft(line)
return ul
else:
ul.append(ListItem([line]))
return ul
def make_ordered_list(self, lines, style):
ol = OrderedList(style=style)
while lines:
line = lines.popleft()
# try romanliststart before orderedliststart -- (i) matches
# both, but is likely the former
if self.re_romanliststart(line):
# print "make_ordered_list: re_romanliststart: %s" % line[:40]
if style == "lower-roman":
ol.append(ListItem([line]))
else:
lines.appendleft(line)
ol.append(self.make_ordered_list(lines, "lower-roman"))
elif self.re_orderedliststart(line):
# print "make_ordered_list: re_orderedliststart: %s" % line[:40]
if style == "lower-alpha":
ol.append(ListItem([line]))
else: # we were in a roman-style sublist, so we should pop up
lines.appendleft(line)
return ol
else:
# print "make_ordered_list: done: %s" % line[:40]
lines.appendleft(line)
return ol
return ol
# Post-process the document tree in a recursive fashion in order to:
#
# Find addressable units (resources that should have unique URI:s,
# e.g. articles and subarticles) and construct IDs for them, like
# "A7", "A25(b)(ii)" (or A25S1P2N2 or...?)
#
# How should we handle Articles themselves -- they have individual
# CELEX numbers and therefore URIs (but subarticles don't)?
def process_body(self, element, prefix, baseuri):
if isinstance(element, str):
return
# print "Starting with " + str(type(element))
counters = defaultdict(int)
for p in element:
counters[type(p)] += 1
# print "handling " + str(type(p))
if hasattr(p, 'fragment_label'): # this is an addressable resource
elementtype = p.fragment_label
if hasattr(p, 'ordinal'):
elementordinal = p.ordinal
else:
elementordinal = counters[type(p)]
fragment = "%s%s%s" % (prefix, elementtype, elementordinal)
if elementtype == "A":
uri = "%s%03d" % (baseuri, elementordinal)
else:
uri = "%s%s%s" % (baseuri, elementtype, elementordinal)
p.id = fragment
p.attrs = {'id': p.id,
'about': uri,
'typeof': p.rdftype}
if elementtype == "A":
uri += "#"
else:
fragment = prefix
uri = baseuri
self.process_body(p, fragment, uri)
################################################################
# Generating
def prep_annotation_file(self, basefile):
goldstandard = self.eval_get_goldstandard(basefile)
baseline_set = self.eval_get_ranked_set_baseline(basefile)
baseline_map = self.eval_calc_map(
self.eval_calc_aps(baseline_set, goldstandard))
self.log.info("Calculating ranked set (pagerank, unrestricted)")
pagerank_set = self.eval_get_ranked_set(basefile, "pagerank",
age_compensation=False,
restrict_cited=False)
pagerank_map = self.eval_calc_map(
self.eval_calc_aps(pagerank_set, goldstandard))
sets = [{'label':'Baseline',
'data':baseline_set},
{'label':'Gold standard',
'data':goldstandard},
{'label':'PageRank',
'data': pagerank_set}]
g = Graph()
g.bind('dct', self.ns['dct'])
g.bind('rinfoex', self.ns['rinfoex'])
XHT_NS = "{http://www.w3.org/1999/xhtml}"
tree = ET.parse(self.parsed_path(basefile))
els = tree.findall("//" + XHT_NS + "div")
articles = []
for el in els:
if 'typeof' in el.attrib and el.attrib['typeof'] == "eurlex:Article":
article = str(el.attrib['id'][1:])
articles.append(article)
for article in articles:
self.log.info("Results for article %s" % article)
articlenode = URIRef(
"http://lagen.nu/ext/celex/12008E%03d" % int(article))
resultsetcollectionnode = BNode()
g.add((resultsetcollectionnode, RDF.type, RDF.List))
rc = Collection(g, resultsetcollectionnode)
g.add((articlenode, DCT["relation"], resultsetcollectionnode))
for s in sets:
resultsetnode = BNode()
listnode = BNode()
rc.append(resultsetnode)
g.add((resultsetnode, RDF.type, RINFOEX[
"RelatedContentCollection"]))
g.add((resultsetnode, DCT["title"], Literal(s["label"])))
g.add((resultsetnode, DCT["hasPart"], listnode))
c = Collection(g, listnode)
g.add((listnode, RDF.type, RDF.List))
if article in s['data']:
print((" Set %s" % s['label']))
for result in s['data'][article]:
resnode = BNode()
g.add((resnode, DCT["references"], Literal(result[0])))
g.add((resnode, DCT["title"], Literal(result[1])))
c.append(resnode)
print((" %s" % result[1]))
return self.graph_to_annotation_file(g, basefile)
def graph_to_image(self, graph, imageformat, filename):
import pydot
import rdflib
dot = pydot.Dot()
# dot.progs = {"dot": "c:/Program Files/Graphviz2.26.3/bin/dot.exe"}
# code from rdflib.util.graph_to_dot, but adjusted to handle unicode
nodes = {}
for s, o in graph.subject_objects():
for i in s, o:
if i not in list(nodes.keys()):
if isinstance(i, rdflib.BNode):
nodes[i] = repr(i)[7:]
elif isinstance(i, rdflib.Literal):
nodes[i] = repr(i)[16:-1]
elif isinstance(i, rdflib.URIRef):
nodes[i] = repr(i)[22:-2]
for s, p, o in graph.triples((None, None, None)):
dot.add_edge(pydot.Edge(nodes[s], nodes[o], label=repr(p)[22:-2]))
self.log.debug("Writing %s format to %s" % (imageformat, filename))
util.ensure_dir(filename)
dot.write(path=filename, prog="dot", format=imageformat)
self.log.debug("Wrote %s" % filename)
################################################################
# Analyzing
top_articles = []
graph_filetype = "png"
# yields an iterator of Article URIs
def _articles(self, basefile):
# Those articles we have gold standard sets for now
self.top_articles = ['http://lagen.nu/ext/celex/12008E263',
'http://lagen.nu/ext/celex/12008E101',
'http://lagen.nu/ext/celex/12008E267',
'http://lagen.nu/ext/celex/12008E107',
'http://lagen.nu/ext/celex/12008E108',
'http://lagen.nu/ext/celex/12008E296',
'http://lagen.nu/ext/celex/12008E258',
'http://lagen.nu/ext/celex/12008E045',
'http://lagen.nu/ext/celex/12008E288',
'http://lagen.nu/ext/celex/12008E034',
]
# For evaluation, only return the 20 top cited articles (which
# analyze_article_citations incidentally compute for us). For
# full-scale generation, use commented-out code below.
if not self.top_articles:
self.top_articles = self.analyze_article_citations(quiet=True)
return self.top_articles
# For full-scale processing, return all articles present in e.g. TFEU:
# XHT_NS = "{http://www.w3.org/1999/xhtml}"
#tree = ET.parse(self.parsed_path(basefile))
#els = tree.findall("//"+XHT_NS+"div")
#for el in els:
# if 'typeof' in el.attrib and el.attrib['typeof'] == "eurlex:Article":
# yield el.attrib['about']
# returns a RDFLib.Graph
def _sameas(self):
sameas = Graph()
sameas_rdf = util.relpath(
os.path.dirname(__file__) + "/../res/eut/sameas.n3")
sameas.load(sameas_rdf, format="n3")
return sameas
def _query_cases(self, article, sameas):
pred = util.ns['owl'] + "sameAs"
q = ""
if article:
q += "{ ?subj eurlex:cites <%s> }\n" % article
for equiv in sameas.objects(URIRef(article), URIRef(pred)):
q += " UNION { ?subj eurlex:cites <%s> }\n" % equiv
return """
PREFIX eurlex:<http://lagen.nu/eurlex#>
PREFIX dct:<http://purl.org/dc/terms/>
SELECT DISTINCT ?subj WHERE {
?subj ?pred ?obj .
%s
FILTER (regex(str(?subj), "^http://lagen.nu/ext/celex/6"))
}
""" % (q)
# Returns a python list of dicts
def _query_cites(self, article, sameas, restrict_citing, restrict_cited, year=None):
if not year:
year = datetime.datetime.today().year
pred = util.ns['owl'] + "sameAs"
q = ""
if restrict_citing:
q += "{ ?subj eurlex:cites <%s> }\n" % article
for equiv in sameas.objects(URIRef(article), URIRef(pred)):
q += " UNION { ?subj eurlex:cites <%s> }\n" % equiv
if restrict_cited:
if q:
q += ".\n"
q = "{?obj eurlex:cites <%s>}\n" % article
for equiv in sameas.objects(URIRef(article), URIRef(pred)):
q += " UNION { ?obj eurlex:cites <%s> }\n" % equiv
return """
PREFIX eurlex:<http://lagen.nu/eurlex#>
PREFIX dct:<http://purl.org/dc/terms/>
SELECT DISTINCT ?subj ?pred ?obj ?celexnum WHERE {
?subj ?pred ?obj .
?subj eurlex:celexnum ?celexnum.
%s
FILTER (regex(str(?obj), "^http://lagen.nu/ext/celex/6") &&
?pred = eurlex:cites &&
str(?celexnum) < str("6%s"@en))
}
""" % (q, year)
def temp_analyze(self):
store = TripleStore(
self.config['triplestore'], self.config['repository'])
# sq = self._query_cites('http://lagen.nu/ext/celex/12008E045',self._sameas(),False, True, 2012)
sq = self._query_cites(None, self._sameas(), False, False, 2012)
print(sq)
cites = store.select(sq, format="python")
self.log.debug(
" Citation graph contains %s citations" % (len(cites)))
# remove duplicate citations, self-citations and pinpoints
# in citations
citedict = {}
for cite in cites:
# print repr(cite)
if "-" in cite['obj']:
cite['obj'] = cite['obj'].split("-")[0]
if (cite['subj'] != cite['obj']):
citedict[(cite['subj'], cite['obj'])] = True
self.log.debug(
" Normalized graph contains %s citations" % len(citedict))
degree = {}
for citing, cited in list(citedict.keys()):
if citing not in degree:
degree[citing] = []
if cited not in degree:
degree[cited] = []
degree[cited].append(citing)
return
def analyze(self):
articles = self.analyze_article_citations(num_of_articles=10)
# articles = self._articles('tfeu')
self.analyze_baseline_queries(articles)
self.analyze_citation_graphs(articles)
def analyze_article_citations(self, num_of_articles=20, quiet=False):
"""Prints and returns a list of the top 20 most important articles in the
TFEU treaty, as determined by the number of citing cases."""
# Create a mapping of article equivalencies, eg Art 28 TEC == Art 34 TFEU
sameas = self._sameas()
equivs = {}
pred = util.ns['owl'] + "sameAs"
for (s, o) in sameas.subject_objects(URIRef(pred)):
equivs[str(o)] = str(s)
self.log.debug(
"Defined %s equivalent article references" % len(equivs))
# Select unique articles citings
store = TripleStore(
self.config['triplestore'], self.config['repository'])
sq = """PREFIX eurlex:<http://lagen.nu/eurlex#>
SELECT DISTINCT ?case ?article WHERE {
?case eurlex:cites ?article .
FILTER (regex(str(?article), "^http://lagen.nu/ext/celex/1"))
}"""
cites = store.select(sq, format="python")
citationcount = {}
unmapped = {}
self.log.debug("Going through %s unique citations" % len(cites))
for cite in cites:
article = cite['article'].split("-")[0]
if "12008M" in article:
pass
elif article in equivs:
article = equivs[article]
else:
if article in unmapped:
unmapped[article] += 1
else:
unmapped[article] = 1
article = None
# Keep track of the number of citing cases
if article:
if article in citationcount:
citationcount[article] += 1
else:
citationcount[article] = 1
# Report the most common cites to older treaty articles that
# we have no equivalents for in TFEU
# sorted_unmapped = sorted(unmapped.iteritems(), key=itemgetter(1))[-num_of_articles:]
#if not quiet:
# print "UNMAPPED:"
# pprint(sorted_unmapped)
# Report and return the most cited articles
sorted_citationcount = sorted(iter(list(
citationcount.items())), key=itemgetter(1))[-num_of_articles:]
if not quiet:
print("CITATION COUNTS:")
pprint(sorted_citationcount)
return [x[0] for x in reversed(sorted_citationcount)]
def analyze_baseline_queries(self, analyzed_articles, num_of_keyterms=5):
basefile = "tfeu"
# Helper from http://effbot.org/zone/element-lib.htm
def flatten(elem, include_tail=0):
text = elem.text or ""
for e in elem:
text += flatten(e, 1)
if include_tail and elem.tail:
text += elem.tail
return text
# step 1: Create a temporary whoosh index in order to find out
# the most significant words for each article
#ana = analysis.StandardAnalyzer()
ana = analysis.StemmingAnalyzer()
# vectorformat = formats.Frequency(ana)
schema = fields.Schema(article=fields.ID(unique=True),
content=fields.TEXT(analyzer=ana,
stored=True))
st = RamStorage()
tmpidx = st.create_index(schema)
w = tmpidx.writer()
XHT_NS = "{http://www.w3.org/1999/xhtml}"
tree = ET.parse(self.parsed_path(basefile))
els = tree.findall("//" + XHT_NS + "div")
articles = []
for el in els:
if 'typeof' in el.attrib and el.attrib['typeof'] == "eurlex:Article":
text = util.normalize_space(flatten(el))
article = str(el.attrib['about'])
articles.append(article)
w.update_document(article=article, content=text)
w.commit()
self.log.info("Indexed %d articles" % len(articles))
# Step 2: Open the large whoosh index containing the text of
# all cases. Then, for each article, use the 5 most distinctive terms
# (filtering away numbers) to create a query against that index
tempsearch = tmpidx.searcher()
g = Graph()
g.bind('celex', 'http://lagen.nu/ext/celex/')
g.bind('ir', 'http://lagen.nu/informationretrieval#')
IR = Namespace('http://lagen.nu/informationretrieval#')
# celex:12008E264 ir:keyterm "blahonga"@en.
outfile = self.generic_path("keyterms", "analyzed", ".tex")
util.ensure_dir(outfile)
fp = open(outfile, "w")
fp.write("""
\\begin{tabular}{r|%s}
\\hline
\\textbf{Art.} & \\multicolumn{%s}{l}{\\textbf{Terms}} \\\\
\\hline
""" % ("l" * num_of_keyterms, num_of_keyterms))
for article in analyzed_articles:
fp.write(str(int(article.split("E")[1])))
r = tempsearch.search(query.Term("article", article))
terms = r.key_terms("content", numterms=num_of_keyterms + 1)
terms = [t[0] for t in terms if not t[0].isdigit(
)][:num_of_keyterms]
for term in terms:
fp.write(" & " + term)
g.add((
URIRef(article), IR["keyterm"], Literal(term, lang="en")))
self.log.debug("Article %s:%r" % (article, terms))
fp.write("\\\\\n")
fp.write("""
\\hline
\\end{tabular}
""")
fp.close()
outfile = self.generic_path("keyterms", "analyzed", ".n3")
util.ensure_dir(outfile)
fp = open(outfile, "w")
fp.write(g.serialize(format="n3"))
fp.close()
def analyze_citation_graphs(self, articles=None):
# Basic setup
# articles = self._articles('tfeu')[-1:]
if not articles:
articles = [None]
if None not in articles:
articles.append(None)
this_year = datetime.datetime.today().year
store = TripleStore(
self.config['triplestore'], self.config['repository'])
sameas = self._sameas()
distributions = []
# For each article (and also for no article = the entire citation graph)
for article in articles:
# Get a list of all eligble cases (needed for proper degree distribution)
sq = self._query_cases(article, sameas)
# print sq
cases = {}
caserows = store.select(sq, format="python")
for r in caserows:
cases[r['subj']] = 0
self.log.info(
"Creating graphs for %s (%s cases)" % (article, len(cases)))
# Step 1. SPARQL the graph on the form ?citing ?cited
# (optionally restricting on citing a particular article)
if article:
sq = self._query_cites(
article, sameas, True, False, this_year + 1)
else:
sq = self._query_cites(
None, sameas, False, False, this_year + 1)
cites = store.select(sq, format="python")
self.log.debug(
" Citation graph contains %s citations" % (len(cites)))
# remove duplicate citations, self-citations and pinpoints
# in citations
citedict = {}
missingcases = {}
for cite in cites:
# print repr(cite)
if "-" in cite['obj']:
cite['obj'] = cite['obj'].split("-")[0]
if not cite['obj'] in cases:
# print "Case %s (cited in %s) does not exist!\n" % (cite['obj'], cite['subj'])
missingcases[cite['obj']] = True
continue
if (cite['subj'] != cite['obj']):
citedict[(cite['subj'], cite['obj'])] = True
self.log.debug(" Normalized graph contains %s citations (%s cited cases not found)" % (len(citedict), len(missingcases)))
# pprint(missingcases.keys()[:10])
# Step 2. Dotify the list (maybe the direction of arrows from
# cited to citing can improve results?) to create a citation
# graph
self.analyse_citegraph_graphviz(list(citedict.keys()), article)
# Step 3. Create a degree distribution plot
degree, distribution = self.analyze_citegraph_degree_distribution(
cases, list(citedict.keys()), article)
if article:
distributions.append([article, distribution])
# Step 4. Create a citation/age scatterplot (or rather hexbin)
self.analyze_citegraph_citation_age_plot(
list(citedict.keys()), degree, distribution, article)
# Step 5. Create a combined degree distribution graph of the
# distinct citation networks. Also add the degree distribution
# of gold standard cases
self.analyze_citegraph_combined_degree_distribution(distributions)
def analyse_citegraph_graphviz(self, cites, article, generate_graph=False):
"""Create a dot file (that can later be processed with dot or gephi)"""
from time import time
filetype = self.graph_filetype
if article:
filename = "citegraph_%s" % article.split("/")[-1]
else:
filename = "citegraph_all"
dot_filename = self.generic_path(filename, "analyzed", ".dot")
self.log.debug(" Writing graphwiz citation graph for %s" % article)
fp = open(dot_filename, "w")
fp.write("""digraph G {
graph [
];
""")
cnt = 0
for citing, cited in cites:
cnt += 1
citing = citing.split("/")[-1]
cited = cited.split("/")[-1]
try:
fp.write(" \"%s\" -> \"%s\" ;\n" % (citing, cited))
except:
pass
fp.write("}")
fp.close()
if generate_graph:
graph_filename = self.generic_path(
dot_filename, "analyzed", "." + filetype)
engine = "dot"
start = time()
cmdline = "%s -T%s -o%s tmp.dot" % (
engine, filetype, graph_filename)
self.log.debug("Running %s" % cmdline)
p = subprocess.Popen(cmdline, shell=True)
ret = p.wait()
self.log.info("Graph %s created in %.3f sec" % (
graph_filename, time() - start))
def analyze_citegraph_degree_distribution(self, cases, cites, article):
self.log.debug(" Writing degree distribution graph")
degree = cases
# self.log.debug(" %s cases, first elements %r" % (len(cases),cases.values()[:5]))
this_year = datetime.datetime.today().year
maxcites = 40
maxage = this_year - 1954
for citing, cited in cites:
if citing not in degree:
degree[citing] = 0
if cited not in degree:
degree[cited] = 0
degree[cited] += 1
distribution = [0] * (max(degree.values()) + 1)
for value in list(degree.values()):
distribution[value] += 1
fig = plt.figure()
fig.set_size_inches(8, 4)
ax = plt.subplot(111)
ax.set_ylabel('Number of cases being cited <x> times')
ax.set_xlabel('Number of citing cases (max %s)' % maxcites)
ax.set_title('Degree distribution of case citations')
filetype = self.graph_filetype
if article:
filename = "degree_distribution_%s" % (article.split("/")[-1])
else:
filename = "degree_distribution_all"
filename = self.generic_path(filename, "analyzed", "." + filetype)
plt.plot(distribution[:maxcites])
plt.savefig(filename)
plt.close()
self.log.debug(" Created %s" % filename)
return (degree, distribution)
def analyze_citegraph_combined_degree_distribution(self, distributions):
self.log.debug(" Writing combined degree distribution graph")
this_year = datetime.datetime.today().year
maxcites = 40
maxnumber = 1000
maxage = this_year - 1954
fig = plt.figure()
fig.set_size_inches(8, 4)
ax = plt.subplot(111)
ax.set_ylabel('Number of cases being cited <x> times')
ax.set_xlabel('Number of citing cases (max %s)' % maxcites)
ax.set_title('Degree distribution of case citations concering specific articles')
filetype = self.graph_filetype
filename = "degree_distribution_combined"
filename = self.generic_path(filename, "analyzed", "." + filetype)
styles = []
for i in range(1, 5):
for j in (['-', '--', '-.', ':']):
#for j in (['-','-','-','-','-']):
styles.append((i, j))
cnt = 0
for (article, distribution) in distributions:
label = article.split("/")[-1].split("E")[1]
self.log.debug(
" Plotting %s %r" % (label, distribution[:4]))
if label.isdigit():
label = "Art. %s" % int(label)
# label += " (%s uncited)" % distribution[0]
lw, ls = styles[cnt]
plt.plot(distribution[:maxcites], label=label,
linestyle=ls, linewidth=lw)
# plt.axis([0,maxcites,0,maxnumber])
plt.legend(loc='best',
markerscale=4,
prop={'size': 'x-small'},
ncol=int(len(distributions) / 6) + 1)
plt.savefig(filename)
plt.close()
self.log.debug(" Created %s" % filename)
def analyze_citegraph_citation_age_plot(self, cites, degree, distribution, article):
self.log.debug(" Writing citation age plot")
this_year = datetime.datetime.today().year
maxcites = 40
maxage = this_year - 1954
cited_by_age = []
citations = []
for case in sorted(degree.keys()):
try:
year = int(case[27:31])
caseage = this_year - year
if year < 1954:
continue
except ValueError:
# some malformed URIs/Celexnos
continue
if degree[case] <= maxcites:
cited_by_age.append(caseage)
citations.append(degree[case])
cases_by_age = [0] * (maxage + 1)
for citing, cited in cites:
year = int(citing[27:31])
caseage = this_year - year
if year < 1954:
continue
if caseage < 0:
continue
cases_by_age[caseage] += 1
fig = plt.figure()
fig.set_size_inches(8, 5)
plt.axis([0, maxage, 0, maxcites])
ax = plt.subplot(211)
plt.hexbin(cited_by_age, citations, gridsize=maxcites,
bins='log', cmap=cm.hot_r)
#plt.scatter(age,citations)
ax.set_title("Distribution of citations by age")
ax.set_ylabel("# of citations")
#cb = plt.colorbar()
#cb.set_label('log(# of cases with # of citations)')
ax = plt.subplot(212)
ax.set_title("Distribution of cases by age")
plt.axis([0, maxage, 0, max(cases_by_age)])
plt.bar(na.array(list(range(len(cases_by_age)))) + 0.5, cases_by_age)
filetype = self.graph_filetype
if article:
filename = "citation_age_plot_%s" % (article.split("/")[-1])
else:
filename = "citation_age_plot_all"
filename = self.generic_path(filename, "analyzed", "." + filetype)
plt.savefig(filename)
plt.close()
self.log.debug(" Created %s" % filename)
################################################################
# Evaluation
def evaluate(self):
result_cache = self.generic_path("result_cache", "eval", ".py")
if os.path.exists(result_cache):
#if False:
self.log.info("Using result cache in %s" % result_cache)
sets = eval(open(result_cache).read())
else:
sets = (
('baseline', self.eval_get_ranked_set_baseline('tfeu')),
('indegree_uncomp_unrestr', self.eval_get_ranked_set(
'tfeu', 'indegree', False, False)),
('indegree_uncomp_restr', self.eval_get_ranked_set(
'tfeu', 'indegree', False, True)),
('indegree_comp_unrestr', self.eval_get_ranked_set(
'tfeu', 'indegree', True, False)),
('indegree_comp_restr',
self.eval_get_ranked_set('tfeu', 'indegree', True, True)),
('hits_uncomp_unrestr',
self.eval_get_ranked_set('tfeu', 'hits', False, False)),
('hits_uncomp_restr',
self.eval_get_ranked_set('tfeu', 'hits', False, True)),
('hits_comp_unrestr',
self.eval_get_ranked_set('tfeu', 'hits', True, False)),
('hits_comp_restr',
self.eval_get_ranked_set('tfeu', 'hits', True, True)),
('pagerank_uncomp_unrestr', self.eval_get_ranked_set(
'tfeu', 'pagerank', False, False)),
('pagerank_uncomp_restr', self.eval_get_ranked_set(
'tfeu', 'pagerank', False, True)),
('pagerank_comp_unrestr', self.eval_get_ranked_set(
'tfeu', 'pagerank', True, False)),
('pagerank_comp_restr',
self.eval_get_ranked_set('tfeu', 'pagerank', True, True)),
)
util.ensure_dir(result_cache)
fp = open(result_cache, "w")
pprint(sets, fp)
fp.close()
aps_cache = self.generic_path("aps_cache", "eval", ".py")
if os.path.exists(aps_cache):
# if False:
self.log.info("Using avg precision cache in %s" % aps_cache)
avg_precisions = eval(open(aps_cache).read())
else:
goldstandard = self.eval_get_goldstandard('tfeu')
avg_precisions = []
f = self.eval_calc_aps
for label, rankedset in sets:
aps = self.eval_calc_aps(rankedset, goldstandard)
avg_precisions.append((label, aps))
fp = open(aps_cache, "w")
pprint(avg_precisions, fp)
fp.close()
self.eval_aps_table(avg_precisions)
if len(avg_precisions) > 5:
maps = []
for label, aps in avg_precisions:
maps.append(self.eval_calc_map(aps))
maps.sort(reverse=True)
thresh = maps[5]
else:
thresh = 0.0
top_avg_precisions = []
for label, aps in avg_precisions:
map_ = self.eval_calc_map(aps)
self.log.info("%25s: MAP %s" % (label, map_))
if (map_ > thresh) or (label == 'baseline'):
top_avg_precisions.append(
("%s: MAP %.3f" % (label, map_), aps))
self.eval_aps_chart(top_avg_precisions)
def eval_calc_aps(self, rankedset, goldstandard):
"""Calculate a set of average precisions for the given set of
result sets for some information needs, compared to the gold
standard for those information needs.
Both rankedset and goldstandard are dicts with lists as values."""
aps = []
for infoneed in list(goldstandard.keys()):
relevants = goldstandard[infoneed]
if relevants:
#self.log.debug(" Calculating AP for %s" % infoneed)
pass
else:
self.log.debug(
" No AP for %s: no gold standard" % (infoneed))
continue
ranking = rankedset[infoneed]
precisions = []
# for each relevant doc in the gold standard, check at
# what position in the ranking the doc occurrs. Check the
# precision of the ranking up to and including that position
for relevant in relevants:
try:
place = ranking.index(relevant)
relevant_cnt = 0
for r in ranking[:place + 1]:
if r in relevants:
relevant_cnt += 1
precision = float(relevant_cnt) / float(place + 1)
#self.log.debug(" Relevant result %s found at %s (relevant_cnt %s), precision %s" % (relevant.split("/")[-1], place+1,relevant_cnt,precision))
except ValueError:
#self.log.debug(" Relevant result %s not found, precision: 0" % relevant.split("/")[-1])
precision = 0
precisions.append(precision)
ap = sum(precisions) / float(len(precisions))
self.log.info(" AP for %s: %s" % (infoneed, ap))
aps.append(ap)
return aps
def eval_calc_map(self, average_precision_set):
return sum(average_precision_set) / float(len(average_precision_set))
_graph_cache = {}
def eval_get_ranked_set(self, basefile, algorithm="pagerank",
age_compensation=False, restrict_cited=True):
# * algorithm: can be "indegree", "hits" or "pagerank".
# * age_compensation: create one graph per year and average to
# compensate for newer cases (that have had less time to gain
# citations)
# * restrict_cited: Use only such citations that exist between
# two cases that both cite the same TFEU article (othewise,
# use all citations from all cases that cite the TFEU
# article, regardless of whether the cited case also cites
# the same TFEU article)
sameas = self._sameas()
store = TripleStore(
self.config['triplestore'], self.config['repository'])
res = {}
self.log.debug("Creating ranked set (%s,age_compensation=%s,restrict_cited=%s)" %
(algorithm, age_compensation, restrict_cited))
for article in self._articles(basefile):
article_celex = article.split("/")[-1]
self.log.debug(" Creating ranking for %s" % (article_celex))
this_year = datetime.datetime.today().year
if age_compensation:
years = list(range(1954, this_year + 1))
# years = range(this_year-3,this_year) # testing
else:
years = list(range(this_year, this_year + 1))
result_by_years = []
for year in years:
restrict_citing = True # always performs better
if (article, year, restrict_cited) in self._graph_cache:
#self.log.debug("Resuing cached graph (%s) for %s in %s" %
# (restrict_cited, article_celex,year))
graph = self._graph_cache[(article, year, restrict_cited)]
else:
#self.log.debug("Calculating graph for %s in %s" %
# (article_celex,year))
sq = self._query_cites(article, sameas, restrict_citing,
restrict_cited, year)
links = store.select(sq, format="python")
graph = self.eval_build_nx_graph(links)
self._graph_cache[(article, year, restrict_cited)] = graph
self.log.debug(" Citegraph for %s in %s has %s edges, %s nodes" %
(article_celex, year, len(graph.edges()),
len(graph.nodes())))
if len(graph.nodes()) == 0:
continue
ranked = self.eval_rank_graph(graph, algorithm)
result_by_years.append({})
for result, score in ranked:
result_by_years[-1][result] = score
if age_compensation:
compensated_ranking = {}
for d, score in ranked: # the last result set
# cut out the year part of the URI
celex = d.split("/")[-1]
try:
age = this_year + 1 - int(
celex[1:5]) # cases decided this year has age 1
# scores = [0,0,0 ... 3,4,8,22]
scores = [result_by_year[d]
for result_by_year
in result_by_years
if d in result_by_year]
avg_score = sum(scores) / float(age)
#self.log.debug("Result %s (age %s, avg score %s) %r" %
# (d,age,avg_score,scores))
compensated_ranking[d] = avg_score
except ValueError:
continue
# return just a list of results, no scores
if age_compensation:
res[article] = [result for result in sorted(compensated_ranking, key=compensated_ranking.__getitem__, reverse=True)]
else:
res[article] = [result[0] for result in ranked]
return res
def eval_build_nx_graph(self, links):
#self.log.debug("Building graph with %s links" % len(links))
nxgraph = networkx.DiGraph()
for link in links:
if "-" in link['obj']:
nxgraph.add_edge(link['subj'], link['obj'].split("-")[0])
else:
nxgraph.add_edge(link['subj'], link['obj'])
#self.log.debug("Graph has %s nodes" % len (nxgraph.nodes()))
return nxgraph
def eval_rank_graph(self, graph, algorithm="pagerank"):
# should return a list of tuples (result,score) sorted in
# reversed order (ie highest score first)
if algorithm == "pagerank":
ranked = pagerank(graph)
elif algorithm == "hits":
ranked = hits(graph, max_iter=10000)[1] # 0: hubs, 1: authorities
elif algorithm == "indegree":
ranked = graph.in_degree()
else:
self.log.error(
"Unknown ranking algorithm %s specified" % algorithm)
sortedrank = sorted(
iter(list(ranked.items())), key=itemgetter(1), reverse=True)
return sortedrank
# computes a ranked set for each baseline using a naive search
# (using the most significant words of each article) and the
# standard BM25F ranking function
def eval_get_ranked_set_baseline(self, basefile):
# Step 1: Read the saved keyterms for a subset of articles
# (created by analyze_baseline_queries)
g = Graph()
g.parse(self.generic_path("keyterms", "analyzed", ".n3"), format="n3")
articles = {}
for (s, p, o) in g:
if not str(s) in articles:
articles[str(s)] = []
articles[str(s)].append(str(o))
# Step 2: Open the large whoosh index containing the text of
# all cases. Then, create a query for each article based on
# the keyterms.
connector = query.Or
indexdir = os.path.sep.join([self.config['datadir'], 'ecj', 'index'])
storage = FileStorage(indexdir)
idx = storage.open_index()
searcher = idx.searcher(weighting=scoring.BM25F())
res = {}
# for article in sorted(articles.keys()):
for article in self._articles(basefile):
terms = articles[article]
rankedset = []
#parser = qparser.QueryParser("content", idx.schema)
#q = parser.parse(connector.join(terms))
q = query.And([
# query.Term("articles", article),
connector([query.Term("content", x) for x in terms])
])
# print q
# self.log.debug("Article %s: %s", article, " or ".join(terms))
results = searcher.search(q, limit=None)
resultidx = 0
# self.log.info("Keyterms for result: %r" % results.key_terms("content", docs=10, numterms=10))
for result in results:
reslbl = "%s (%s)" % (
result['basefile'], results.score(resultidx))
rankedset.append([result['basefile'], reslbl])
# self.log.debug(u"\t%s: %2.2d" % (result['title'], results.score(resultidx)))
resultidx += 1
self.log.info("Created baseline ranked set for %s: Top result %s (of %s)" %
(article.split("/")[-1], rankedset[0][0], len(rankedset)))
# return just a list of URIs, no scoring information. But the
# full URI isnt available in the whoosh db, so we recreate it.
res[article] = ["http://lagen.nu/ext/celex/%s" % x[
0] for x in rankedset]
return res
def eval_get_goldstandard(self, basefile):
goldstandard = Graph()
goldstandard_rdf = util.relpath(
os.path.dirname(__file__) + "/../res/eut/goldstandard.n3")
goldstandard.load(goldstandard_rdf, format="n3")
pred = util.ns['ir'] + 'isRelevantFor'
res = {}
store = TripleStore(
self.config['triplestore'], self.config['repository'])
sq_templ = """PREFIX eurlex:<http://lagen.nu/eurlex#>
SELECT ?party ?casenum ?celexnum WHERE {
<%s> eurlex:party ?party ;
eurlex:casenum ?casenum ;
eurlex:celexnum ?celexnum .
}"""
self.log.debug(
"Loading gold standard relevance judgments for %s" % basefile)
for article in self._articles(basefile):
res[article] = []
for o in goldstandard.objects(URIRef(article), URIRef(pred)):
res[article].append(str(o))
# Make sure the case exists and is the case we're looking for
sq = sq_templ % str(o)
parties = store.select(sq, format="python")
if parties:
pass
#self.log.debug(" %s: %s (%s)" %
# (parties[0]['celexnum'],
# parties[0]['casenum'],
# " v ".join([x['party'] for x in parties])))
else:
self.log.warning("Can't find %s in triple store!" % o)
self.log.debug(" Gold standard for %s: %s relevant docs" %
(article, len(res[article])))
res[article].sort()
return res
def eval_aps_chart(self, avg_precisions):
# Create a chart in PDF format and a equvialent table
import matplotlib.pyplot as plt
import numpy as np
# create linestyle/width array:
styles = []
for i in range(1, 5):
# for j in (['-','--','-.',':']):
for j in (['-', '-', '-', '-', '-']):
styles.append((i, j))
fig = plt.figure()
fig.set_size_inches(8, 4)
ax = plt.subplot(111)
width = len(avg_precisions[0][1])
plt.axis([0, width - 1, 0, 0.3])
ind = np.arange(width)
ax.set_ylabel('Average precision')
ax.set_title('Average precision for information needs')
xticklabels = ["Art %d" % int(x.split("E")[-1])
for x in self._articles('tfeu')]
ax.set_xticks(ind)
ax.set_xticklabels(xticklabels)
cnt = 0
for label, aps in avg_precisions:
# print "%s: %r" % (label, aps)
lw, ls = styles[cnt]
plt.plot(ind, aps, label=label, linestyle=ls, linewidth=lw)
cnt += 1
plt.legend(loc='best',
markerscale=4,
prop={'size': 'x-small'},
ncol=int(len(avg_precisions) / 8) + 1)
filetype = self.graph_filetype
filename = "average_precision"
filename = self.generic_path(filename, "eval", "." + filetype)
plt.savefig(filename)
self.log.info("Created average precision chart as %s" % filename)
plt.close()
#plt.show()
def eval_aps_table(self, avg_precisions):
filetype = "tex"
filename = "average_precision"
filename = self.generic_path(filename, "eval", "." + filetype)
articles = [x.split("E")[-1] for x in self._articles('tfeu')]
tblformat = "{r|" + "l|" * len(articles) + "l}"
tblhead = "".join(["& \\textbf{%s} " % x for x in articles])
fp = open(filename, "w")
fp.write("""\\begin{tabular}%s
\\hline
\\textbf{Conf} %s & \\textbf{MAP}\\\\
\\hline
""" % (tblformat, tblhead))
for label, aps in avg_precisions:
if label == "baseline":
label = "base"
else:
label = "".join([x[0].upper() for x in label.split("_")])
fp.write("%s & " % label)
for ap in aps:
fp.write("%.3f & " % ap)
fp.write("%.3f \\\\ \n" % self.eval_calc_map(aps))
fp.write("""\\hline
\\end{tabular}
""")
self.log.info("Created average precision table as %s" % filename)
@classmethod
def tabs(cls, primary=False):
return [['EU law', '/eu/']]
if __name__ == "__main__":
EurlexTreaties.run()
| Python |
# flake8: noqa
from .eurlexcaselaw import EurlexCaselaw
from .eurlextreaties import EurlexTreaties
| Python |
# flake8: noqa
from .keyword import Keyword
from .skeleton import Skeleton
from .wiki import Wiki
| Python |
# flake8: noqa
from .citationparser import CitationParser
from .uriformatter import URIFormatter
from .describer import Describer
from .layeredconfig import LayeredConfig
from .pdfreader import PDFReader
from .textreader import TextReader
from .wordreader import WordReader
from .triplestore import TripleStore
from .fulltextindex import FulltextIndex
from .devel import Devel
from .fsmparser import FSMParser
from .tocpageset import TocPageset
from .tocpage import TocPage
from .toccriteria import TocCriteria
from .newscriteria import NewsCriteria
from .document import Document
from .documententry import DocumentEntry
from .documentstore import DocumentStore
from .documentrepository import DocumentRepository
from .pdfdocumentrepository import PDFDocumentRepository
from .compositerepository import CompositeRepository
__version__='0.1-dev'
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rdflib.extras.describer import Describer as OrigDescriber
from rdflib import URIRef, Literal, RDF, Graph
class Describer(OrigDescriber):
"""Extends the utility class
:py:class:`rdflib.extras.describer.Describer` so that it reads
values and refences as well.
:param graph: The graph to read from and write to
:type graph: :py:class:`~rdflib.graph.Graph`
:param about: the current subject to use
:type about: string or :py:class:`~rdflib.term.Identifier`
:param base: Base URI for any relative URIs used with :py:meth:`~ferenda.Describer.about`, :py:meth:`~ferenda.Describer.rel` or :py:meth:`~ferenda.Describer.rev`,
:type base: string
"""
def getvalues(self, p):
"""Get a list (possibly empty) of all literal values for the
given property and the current subject. Values will be
converted to plain literals, i.e. not
:py:class:`rdflib.term.Literal` objects.
:param p: The property of the sought literal.
:type p: :py:class:`rdflib.term.URIRef`
:returns: a list of matching literals
:rtype: list of strings (or other appropriate python type if the
literal has a datatype)
"""
return [x.toPython() for x in self.graph.objects(self._current(), p) if isinstance(x, Literal)]
def getrels(self, p):
"""Get a list (possibly empty) of all URIs for the
given property and the current subject. Values will be
converted to strings, i.e. not
:py:class:`rdflib.term.URIRef` objects.
:param p: The property of the sought URI.
:type p: :py:class:`rdflib.term.URIRef`
:returns: The matching URIs
:rtype: list of strings
"""
return [str(x) for x in self.graph.objects(self._current(), p) if isinstance(x, URIRef)]
def getrdftype(self):
"""Get the `rdf:type` of the current subject.
:returns: The URI of the current subjects's rdf:type.
:rtype: string
"""
return self.getrel(RDF.type)
def getvalue(self, p):
"""Get a single literal value for the given property and the
current subject. If the graph contains zero or more than one
such literal, a :py:class:`KeyError` will be raised.
:param p: The property of the sought literal.
:type p: :py:class:`rdflib.term.URIRef`
:returns: The sought literal
:rtype: string (or other appropriate python type if the literal has
a datatype)
"""
values = list(self.getvalues(p))
if len(values) == 0:
raise KeyError("No objects for predicate %s" % p)
elif len(values) > 1:
raise KeyError("More than one object for predicatee %s" % p)
return values[0]
def getrel(self, p):
"""Get a single URI for the given property and the current
subject. If the graph contains zero or more than one such URI,
a :py:class:`KeyError` will be raised.
:param p: The property of the sought literal.
:type p: :py:class:`rdflib.term.URIRef`
:returns: The sought URI
:rtype: string
"""
refs = list(self.getrels(p))
if len(refs) == 0:
raise KeyError("No objects for predicate %s" + p)
elif len(refs) > 1:
raise KeyError("More than one object for predicatee %s" + p)
return refs[0]
| Python |
# -*- coding: utf-8 -*-
"""Utility functions for running various ferenda tasks from the
command line, including registering classes in the configuration
file. If you're using the :py:class:`~ferenda.DocumentRepository` API
directly in your code, you'll probably only need
:py:func:`makeresources`, :py:func:`frontpage` and possibly
:py:func:`setup_logger`. If you're using the ``ferenda-build.py``
tool, you don't need to directly call any of these methods --
``ferenda-build.py`` calls :py:func:`run`, which calls everything
else, for you.
"""
from __future__ import unicode_literals
# system
import os
import time
import stat
import subprocess
import sys
import inspect
import logging
import json
import mimetypes
from ast import literal_eval
from datetime import datetime
import xml.etree.cElementTree as ET
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import six # technically third-party, but needed for cross-version
# system imports
if six.PY3:
from urllib.parse import urlsplit
else:
from urlparse import urlsplit
from wsgiref.simple_server import make_server
from wsgiref.util import FileWrapper
# from pprint import pprint
from six.moves import configparser
input = six.moves.input
# 3rd party
import pkg_resources
import requests
import requests.exceptions
# my modules
from ferenda import DocumentRepository
from ferenda import DocumentStore
from ferenda import TripleStore
from ferenda import util
from ferenda import errors
from ferenda import LayeredConfig
# NOTE: This is part of the published API and must be callable in
# scenarios without configfile or logger.
def makeresources(repos,
resourcedir="data/rsrc",
combine=False,
cssfiles=[],
jsfiles=[],
sitename="MySite",
sitedescription="Just another Ferenda site"):
"""Creates the web assets/resources needed for the web app
(concatenated and minified js/css files, resources.xml used by
most XSLT stylesheets, etc).
:param repos: The repositories to create resources for
:type repos: list (or other iterable) of docrepo objects
:param combine: whether to combine and compact/minify CSS and JS files
:type combine: bool
:param resourcedir: where to put generated/copied resources
:type resourcedir: string (directory name)
:returns: All created/copied css, js and resources.xml files
:rtype: dict of lists
"""
# NOTE: even though the returned dict of lists of paths should use the appropriate path
# separator for the current platform (/ on unix, \ on windows), the resources.xml always
# uses the / separator regardless of platform.
log = setup_logger()
res = {}
processed_files = []
# Create minfied combined.css file
cssbuffer = six.BytesIO()
cssurls = []
cssdir = resourcedir + os.sep + "css"
# 1. Process all css files specified in the main config
for cssfile in cssfiles:
if cssfile in processed_files:
continue
cssurls.append(_process_file(
cssfile, cssbuffer, cssdir, "ferenda.ini", combine))
processed_files.append(cssfile)
# 2. Visit each enabled class and see if it specifies additional
# css files to read
for inst in repos:
if not hasattr(inst,'config'):
continue
for cssfile in inst.config.cssfiles:
if cssfile in processed_files:
continue
# FIXME: CSS file path should be interpreted
# relative to the module source code file instead
# of cwd
cssurls.append(_process_file(
cssfile, cssbuffer, cssdir, inst.alias, combine))
processed_files.append(cssfile)
cssurls = list(filter(None,cssurls))
if combine:
# 3. Minify the result using cssmin
css = cssbuffer.getvalue().decode('utf-8')
log.debug("Read %s files, CSS is now %s bytes" % (len(
cssfiles), len(css)))
from .thirdparty import cssmin
css = cssmin.cssmin(css)
log.debug("After minifying, CSS is now %s bytes" % (len(css)))
outcssfile = cssdir + os.sep + 'combined.css'
util.writefile(outcssfile, css)
res['css'] = [_filepath_to_urlpath(outcssfile,2)]
else:
res['css'] = cssurls
# Create data/rsrc/js/combined.js in a similar way but use slimit to
# compress the result
jsbuffer = six.BytesIO()
jsurls = []
jsdir = resourcedir + os.sep + "js"
for jsfile in jsfiles:
if jsfile in processed_files:
continue
jsurls.append(_process_file(
jsfile, jsbuffer, jsdir, "ferenda.ini", combine))
processed_files.append(jsfile)
for inst in repos:
if not hasattr(inst,'config'):
continue
for jsfile in inst.config.jsfiles:
if jsfile in processed_files:
continue
jsurls.append(_process_file(
jsfile, jsbuffer, jsdir, inst.alias, combine))
processed_files.append(jsfile)
jsurls = list(filter(None,jsurls))
if combine:
js = jsbuffer.getvalue().decode('utf-8')
log.debug("Read %s files, JS is now %s bytes" % (len(jsfiles),
len(js)))
# slimit provides better perf, but isn't py3 compatible
# import slimit
#js = slimit.minify(
# jsbuffer.getvalue(), mangle=True, mangle_toplevel=True)
import jsmin
js = jsmin.jsmin(js)
log.debug("After compression, JS is now %s bytes" % (len(js)))
outjsfile = jsdir + os.sep + 'combined.js'
util.writefile(outjsfile, js)
res['js'] = [_filepath_to_urlpath(outjsfile,2)]
else:
res['js'] = jsurls
# Populate data/rsrc/img/ from files found in config['imgdir'] and
# module.imagedir (putting each module's imagedir in a separate
# subdir, eg EurlexTreaties.imagedir = res/eut/img results in
# res/eut/img/foo.png being placed in data/rsrc/img/eut/foo.png
# Finally, create a resources.xml file containing refs to the css and js
# files (and also favicon?) that base5.xsl can include.
# FIXME: Do this in LXML instead
root = ET.Element("configuration")
sitename_el = ET.SubElement(root, "sitename")
sitename_el.text = sitename
sitedescription_el = ET.SubElement(root, "sitedescription")
sitedescription_el.text = sitedescription
tabs = ET.SubElement(
ET.SubElement(ET.SubElement(root, "tabs"), "nav"), "ul")
sitetabs = []
for inst in repos:
if hasattr(inst, 'tabs'):
for tab in inst.tabs():
if not tab in sitetabs:
(label, url) = tab
alias = inst.alias
log.debug("Adding tab %(label)s (%(url)s) from docrepo %(alias)s" % locals())
sitetabs.append(tab)
for tab in sitetabs:
link = ET.SubElement(ET.SubElement(tabs, "li"), "a")
link.text = tab[0]
link.attrib['href'] = tab[1]
stylesheets = ET.SubElement(root, "stylesheets")
log.debug("Adding %s stylesheets to resources.xml"%len(res['css']))
for f in res['css']:
stylesheet = ET.SubElement(stylesheets, "link")
stylesheet.attrib['rel'] = "stylesheet"
stylesheet.attrib['href'] = f
log.debug("Adding %s javascripts to resources.xml"%len(res['js']))
javascripts = ET.SubElement(root, "javascripts")
for f in res['js']:
javascript = ET.SubElement(javascripts, "script")
javascript.attrib['src'] = f
javascript.text = " "
util.indent_node(root)
tree = ET.ElementTree(root)
outxmlfile = resourcedir+os.sep+"resources.xml"
util.ensure_dir(outxmlfile)
tree.write(outxmlfile, encoding="utf-8")
log.debug("Wrote %s" %outxmlfile)
# NOTE: If DocumentRepository.generate feels like it, it may
# create a temporary copy of resources.xml with js/css paths
# modified to be relative to the generated file (which may be 2-3
# directories deep) instead of the document root, in order to
# support static HTML file generation with arbitrarily deep
# directory structure.
res['xml'] = [_filepath_to_urlpath(outxmlfile,1)]
if os.sep == "\\":
for part in res:
res[part] = [x.replace('/',os.sep) for x in res[part]]
return res
def _process_file(filename, buf, destdir, origin="", combine=False):
"""
Helper function to concatenate or copy CSS/JS (optionally
processing them with e.g. Scss) or other files to correct place
under the web root directory.
:param filename: The name (relative to the ferenda package) of the file
:param buf: A buffer into which the contents of the file is written (if combine == True)
:param destdir: The directory into which the file will be copied (unless combine == True)
:param origin: The source of the configuration that specifies this files
:param combine: Whether to combine all files into a single one
:returns: The URL path of the resulting file, relative to the web root (or None if combine == True)
:rtype: str
"""
mapping = {'.scss': {'transform': _transform_scss,
'suffix': '.css'}
}
log = setup_logger()
# FIXME: extend this through a load-path mechanism?
if os.path.exists(filename):
log.debug("Process file found %s as a file relative to %s" % (filename, os.getcwd()))
fp = open(filename,"rb")
elif pkg_resources.resource_exists('ferenda',filename):
log.debug("Found %s as a resource" % filename)
fp = pkg_resources.resource_stream('ferenda',filename)
else:
log.warning("file %(filename)s (specified in %(origin)s) doesn't exist" % locals())
return None
(base, ext) = os.path.splitext(filename)
if ext in mapping:
outfile = base + mapping[ext]['suffix']
mapping[ext]['transform'](filename, outfile)
filename = outfile
if combine:
log.debug("combining %s into buffer" % filename)
buf.write(fp.read())
fp.close()
return None
else:
log.debug("writing %s out to %s" % (filename,destdir))
outfile = destdir + os.sep + os.path.basename(filename)
util.ensure_dir(outfile)
with open(outfile,"wb") as fp2:
fp2.write(fp.read())
fp.close()
return _filepath_to_urlpath(outfile,2)
def _transform_scss(infile, outfile):
print(("Transforming %s to %s" % (infile, outfile)))
from scss import Scss
compiler = Scss()
util.writefile(outfile, compiler.compile(util.readfile(infile)))
def frontpage(repos,
path="data/index.html",
stylesheet="res/xsl/frontpage.xsl",
sitename="MySite"):
"""Create a suitable frontpage.
:param repos: The repositories to list on the frontpage
:type repos: list (or other iterable) of docrepo objects
:param path: the filename to create."""
log = setup_logger()
with util.logtime(log.info,
"frontpage: wrote %(path)s (%(elapsed).3f sec)",
{'path':path}):
blocks = ""
# TODO: if any of the repos has inst.config.primaryfrontpage =
# True, then all other repos should provide their
# .frontpage_content() into that repos .frontpage impl (and this
# method should not have any xhtml template like below).
for inst in repos:
content = inst.frontpage_content()
blocks += "<div id='%s'>%s</div>" % (inst.alias, content)
log.debug("frontpage: repo %s provided %s chars of content" % (inst.alias, len(content)))
vars = {'title': sitename,
'blocks': blocks}
xhtml = """<?xml version='1.0' encoding='utf-8'?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
</head>
<body>
%(blocks)s
</body>
</html>""" % vars
xhtml_path = os.path.splitext(path)[0] + ".xhtml"
with open(xhtml_path,"w") as fp:
fp.write(xhtml)
xsltdir = repos[0].setup_transform_templates(os.path.dirname(stylesheet))
params = repos[0].get_transform_configuration(xsltdir,xhtml_path)
repos[0].transform_html(stylesheet, xhtml_path, path, params)
return True
def runserver(repos,
port=8000,
documentroot="data", # relative to cwd
apiendpoint="/api/",
searchendpoint="/search/"):
"""Starts up a internal webserver and runs the WSGI app (see
:py:func:`make_wsgi_app`) using all the specified document
repositories. Runs forever (or until interrupted by keyboard).
:param repos: Object instances for the repositories that should be served
over HTTP
:type repos: list
:param port: The port to use
:type port: int
:param documentroot: The root document, used to locate files not directly
handled by any repository
:type documentroot: str
:param apiendpoint: The part of the URI space handled by the API
functionality
:type apiendpoint: str
:param searchendpoint: The part of the URI space handled by the search
functionality
:type searchendpoint: str
"""
print("Serving wsgi app at http://localhost:%s/" % port)
kwargs = {'port':port,
'documentroot': documentroot,
'apiendpoint': apiendpoint,
'searchendpoint' :searchendpoint,
'repos': repos}
httpd = make_server('', port, make_wsgi_app(None, **kwargs))
httpd.serve_forever()
def make_wsgi_app(inifile=None, **kwargs):
"""Creates a callable object that can act as a WSGI application by
mod_wsgi, gunicorn, the built-in webserver, or any other
WSGI-compliant webserver.
:param inifile: The full path to a ``ferenda.ini`` configuration file
:type inifile: str
:param \*\*kwargs: Configuration values for the wsgi app (must
include ``documentroot``, ``apiendpoint`` and
``searchendpoint``). Only used if ``inifile``
is not provided.
:returns: A WSGI application
:rtype: callable
"""
if inifile:
assert os.path.exists(inifile), "INI file %s doesn't exist (relative to %s)" % (inifile, os.getcwd())
config = _load_config(inifile)
args = _setup_runserver_args(config, inifile)
else:
args = kwargs # sanity check: is documentroot, searchendpoint and
# apiendpoint defined?
def app(environ, start_response):
path = environ['PATH_INFO']
if path.startswith(args['searchendpoint']):
return _wsgi_search(environ, start_response, args)
elif path.startswith(args['apiendpoint']):
return _wsgi_api(environ, start_response, args)
else:
return _wsgi_static(environ, start_response, args)
return app
def _wsgi_search(environ, start_response, args):
"""WSGI method, called by the wsgi app for requests that matches
``searchendpoint``."""
def htmlify(d):
return "<ul>" + ("\n".join("<li>%s: %s" % (x, d[x]) for x in d.keys())) + "</ul>"
msg = """<h1>Search endpoint</h1>Environ:%s Args:%s""" % (htmlify(environ),
htmlify(args))
data = msg.encode('utf-8')
start_response("200 OK", [
("Content-Type", "text/html; charset=utf-8"),
("Content-Length", str(len(data)))
])
return iter([data])
def _wsgi_api(environ, start_response, args):
"""WSGI method, called by the wsgi app for requests that matches
``apiendpoint``."""
d = dict((str(key),str(environ[key])) for key in environ.keys())
data = json.dumps(dict(d), indent=4).encode('utf-8')
start_response("200 OK", [
("Content-Type", "application/json"),
("Content-Length", str(len(data)))
])
return iter([data])
def _wsgi_static(environ, start_response, args):
"""WSGI method, called by the wsgi app for all other requests not handled
by :py:func:`~ferenda.Manager.search` or :py:func:`~ferenda.Manager.api`"""
fullpath = args['documentroot'] + environ['PATH_INFO']
# we should start by asking all repos "do you handle this path"?
# default impl is to say yes if 1st seg == self.alias and the rest
# can be treated as basefile yielding a existing generated file.
# a yes answer contains a FileWrapper around the repo-selected
# file and optionally length (but not status, always 200, or
# mimetype, always text/html). None means no.
fp = None
for repo in args['repos']:
(fp, length, mimetype) = repo.http_handle(environ) # and args?
if fp:
status = "200 OK"
iterdata = FileWrapper(fp)
break
if not fp:
if os.path.isdir(fullpath):
fullpath = fullpath + "index.html"
if os.path.exists(fullpath):
ext = os.path.splitext(fullpath)[1]
if not mimetypes.inited:
mimetypes.init()
mimetype = mimetypes.types_map.get(ext, 'text/plain')
status = "200 OK"
length = os.path.getsize(fullpath)
fp = open(fullpath, "rb")
iterdata = FileWrapper(fp)
else:
msg = "<h1>404</h1>The path %s not found at %s" % (environ['PATH_INFO'],
fullpath)
mimetype = "text/html"
status = "404 Not Found"
length = len(msg.encode('utf-8'))
fp = six.BytesIO(msg.encode('utf-8'))
iterdata = FileWrapper(fp)
start_response(status, [
("Content-type", mimetype),
("Content-length", str(length))
])
return iterdata
# FIXME: How can we make sure fp.close() is called, regardless of
# whether it's a real fileobject or a BytesIO object?
loglevels = {'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL}
def setup_logger(level='INFO',filename=None):
"""Sets up the logging facilities and creates the module-global log
object as a root logger.
:param name: The name of the logger (used in log messages)
:type name: str
:param level: 'DEBUG','INFO','WARNING','ERROR' or 'CRITICAL'
:type level: str
:param filename: The name of the file to log to. If None, log to stdout
:type filename: str
"""
if not isinstance(level, int):
loglevel = loglevels[level]
l = logging.getLogger() # get the root logger
#if l.handlers == []:
if filename:
h = logging.FileHandler(filename)
else:
h = logging.StreamHandler()
for existing_handler in l.handlers:
#if type(h) == type(existing_handler):
# print("A %s already existed, not adding a new one" % h)
return l
h.setLevel(level)
h.setFormatter(
logging.Formatter(
"%(asctime)s %(name)s %(levelname)s %(message)s",
datefmt="%H:%M:%S"))
l.addHandler(h)
l.setLevel(level)
# turn of some library loggers we're not interested in
for logname in ['requests.packages.urllib3.connectionpool',
'rdflib.plugins.sleepycat']:
logging.getLogger(logname).propagate = False
return l
def run(argv):
"""Runs a particular action for either a particular class or all
enabled classes.
:param argv: a ``sys.argv``-style list of strings specifying the class
to load, the action to run, and additional
parameters. The first parameter is either the name of
the class-or-alias, or the special value "all",
meaning all registered classes in turn. The second
parameter is the action to run, or the special value
"all" to run all actions in correct order. Remaining
parameters are either configuration parameters (if
prefixed with ``--``, e.g. ``--loglevel=INFO``, or
positional arguments to the specified action).
"""
config = _load_config(_find_config_file(),argv)
# if logfile is set to True, autogenerate logfile name from
# current datetime. Otherwise assume logfile is set to the desired
# file name of the log
log = setup_logger(level=config.loglevel,filename=None)
if config.logfile:
if isinstance(config.logfile,bool):
logfile = "%s/logs/%s.log" % (config.datadir,datetime.now().strftime("%Y%m%d-%H%M%S"))
else:
logfile = config.logfile
util.ensure_dir(logfile)
setup_logger(level=config.loglevel,filename=logfile)
enabled = _enabled_classes() # reads only ferenda.ini using configparser rather than layeredconfig
# returns {'ferenda.sources.docrepo.DocRepo':'base',...}
enabled_aliases = dict(reversed(item) for item in enabled.items())
if len(argv) < 1:
_print_usage() # also lists enabled modules
else:
# _filter_argv("ecj", "parse", "62008J0034", "--force=True", "--frobnicate")
# -> ("ecj", "parse", ["62008J0034"])
# _filter_argv("ecj", "--frobnicate") -> ("ecj", None, [])
(classname, action, args) = _filter_argv(argv)
if action == 'enable':
try:
return enable(classname)
except (ImportError, ValueError) as e:
log.error(six.text_type(e))
return None
elif action == 'runserver':
args = _setup_runserver_args(config, _find_config_file())
return runserver(**args) # Note: the actual runserver method never returns
elif action == 'makeresources':
repoclasses = _classes_from_classname(enabled, classname)
args = _setup_makeresources_args(config)
repos = []
for cls in repoclasses:
inst = _instantiate_class(cls, _find_config_file(), argv)
repos.append(inst)
return makeresources(repos, **args)
elif action == 'frontpage':
repoclasses = _classes_from_classname(enabled, classname)
args = _setup_frontpage_args(config, argv)
return frontpage(**args)
elif action == 'all':
classnames = _setup_classnames(enabled, classname)
results = OrderedDict()
for action in ("download",
"parse", "relate", "makeresources",
"generate", "toc", "news", "frontpage"):
if action in ("makeresources", "frontpage"):
argscopy = list(args)
argscopy.extend(_filter_argv_options(argv))
argscopy.insert(0, action)
argscopy.insert(0, "all")
results[action] = run(argscopy)
else:
results[action] = OrderedDict()
for classname in classnames:
alias = enabled_aliases[classname]
argscopy = list(args)
argscopy.extend(_filter_argv_options(argv))
if (action in ("parse", "relate", "generate") and
"--all" not in argscopy):
argscopy.append("--all")
argscopy.insert(0, action)
argscopy.insert(0, classname)
results[action][alias] = run(argscopy)
return results
else:
if classname == "all":
ret = []
for classname, alias in enabled.items():
argv_copy = list(argv)
argv_copy[0] = alias
ret.append(_run_class(enabled, argv_copy))
return ret
else:
return _run_class(enabled, argv)
def enable(classname):
"""Registers a class by creating a section for it in the
configuration file (``ferenda.ini``). Returns the short-form
alias for the class.
>>> enable("ferenda.DocumentRepository") == 'base'
True
>>> os.unlink("ferenda.ini")
:param classname: The fully qualified name of the class
:type classname: str
:returns: The short-form alias for the class
:rtype: str
"""
cls = _load_class(classname) # eg ferenda.DocumentRepository
# throws error if unsuccessful
cfg = configparser.ConfigParser()
configfilename = _find_config_file(create=True)
cfg.read([configfilename])
alias = cls.alias
cfg.add_section(alias)
cfg.set(alias, "class", classname)
with open(configfilename, "w") as fp:
cfg.write(fp)
log = setup_logger()
log.info("Enabled class %s (alias '%s')" % (classname, alias))
return alias
def setup():
"""Creates a project, complete with configuration file and
ferenda-build tool. Takes no parameters, but expects ``sys.argv``
to contain the path to the project being created.
Checks to see that all required python modules and command line
utilities are present. Also checks which triple store(s) are
available and selects the best one (in order of preference:
Sesame, Fuseki, RDFLib+Sleepycat, RDFLib+SQLite).
.. note::
The ``ferenda-setup`` script that gets installed with ferenda is
a tiny wrapper around this function.
"""
if len(sys.argv) < 2:
print(("Usage: %s [project-directory]" % sys.argv[0]))
return False
projdir = sys.argv[1]
if os.path.exists(projdir):
print(("Project directory %s already exists" % projdir))
return False
sitename = os.path.basename(projdir)
ok = _preflight_check()
if not ok:
print("There were some errors when checking your environment. Proceed anyway? (y/N)")
answer = input()
if answer != "y":
sys.exit(1)
# The template ini file needs values for triple store
# configuration. Find out the best triple store we can use.
storetype, storelocation, storerepository = _select_triplestore(sitename)
if not storetype:
print("Cannot find a useable triple store. Proceed anyway? (y/N)")
answer = input()
if answer != "y":
sys.exit(1)
if not os.path.exists(projdir):
os.makedirs(projdir)
# step 1: create buildscript
buildscript = projdir + os.sep + "ferenda-build.py"
util.resource_extract('res/scripts/ferenda-build.py', buildscript)
mode = os.stat(buildscript)[stat.ST_MODE]
os.chmod(buildscript, mode | stat.S_IXUSR)
# step 2: create config file
configfile = projdir + os.sep + "ferenda.ini"
util.resource_extract('res/scripts/ferenda.template.ini', configfile,
locals())
print("Project created in %s" % projdir)
# step 3: create WSGI app
wsgifile = projdir + os.sep + "wsgi.py"
util.resource_extract('res/scripts/wsgi.py', wsgifile)
def _load_config(filename, argv=[]):
"""Loads general configuration information from ``filename`` (which
should be a full path to a ferenda.ini file) and/or command line
arguments into a :py:class:`~ferenda.LayeredConfig` instance. It
contains a built-in dict of default configuration values which can be
overridden by the config file or command line arguments."""
# FIXME: Expand on this list of defaults? Note that it only
# pertains to global configuration, not docrepo configuration
# (those have the get_default_options() method).
defaults = {'loglevel': 'DEBUG',
'logfile': True,
'datadir': 'data',
'combineresources': False,
# css and js files must be specified in config file or
# by docrepos -- there should be no default list of
# files (because we have no way of removing items)
#'cssfiles': ['res/css/normalize.css',
# 'res/css/main.css'],
#'jsfiles': ['res/js/jquery-1.9.0.js',
# 'res/js/modernizr-2.6.2-respond-1.1.0.min.js'],
'sitename': 'MySite',
'sitedescription': 'Just another Ferenda site'}
config = LayeredConfig(defaults, filename, argv, cascade=True)
return config
def _classes_from_classname(enabled, classname):
"""Given a classname or alias, returns a list of class objects.
:param enabled: The currently enabled repo classes, as returned by
:py:func:`~ferenda.Manager._enabled_classes`
:type enabled: dict
:param classname: A classname (eg ``'ferenda.DocumentRepository'``) or
alias (eg ``'base'``). The special value ``'all'``
expands to all enabled classes.
:returns: Class objects
:rtype: list
"""
classnames = _setup_classnames(enabled, classname)
instances = [_load_class(x) for x in classnames]
return instances
def _setup_makeresources_args(config):
"""Given a config object, returns a dict with some of those configuration options, but suitable as arguments for :py:func:`ferenda.Manager.makeresources`.
:param config: An initialized config object with data from a ferenda.ini file
:type config: ferenda.LayeredConfig
:returns: A subset of the same configuration options
:rtype: dict
"""
# our config file stores the cssfiles and jsfiles parameters as string
def getlist(config, key):
if hasattr(config, key):
if isinstance(getattr(config,key), six.text_type):
return literal_eval(getattr(config,key))
else:
return getattr(config,key)
else:
return []
cssfiles = getlist(config, 'cssfiles')
jsfiles = getlist(config, 'jsfiles')
return {'resourcedir': config.datadir + os.sep + 'rsrc',
'combine': config.combineresources,
'cssfiles': cssfiles,
'jsfiles': jsfiles,
'sitename': config.sitename,
'sitedescription': config.sitedescription}
def _setup_classnames(enabled, classname):
"""Converts an alias (as enabled in a ferenda.ini file) to a fully
qualified class name. If the special alias "all" is used, return
the class names of all enabled repositories.
Note: a list is always returned, even when the classname ``'all'``
is not used. If a fully qualified classname is provided, a list
with the same string is returned.
:param enabled: The currently enabled repo classes, as returned by
:py:func:`~ferenda.Manager._enabled_classes`
:type enabled: dict
:param classname: A classname (eg ``'ferenda.DocumentRepository'``) or
alias (eg ``'base'``). The special value ``'all'``
expands to all enabled classes.
:returns: Class names (as strings)
:rtype: list
"""
# "w3c" => ['ferenda.sources.tech.W3Standards']
# "all" => ['ferenda.sources.tech.W3Standards', 'ferenda.sources.tech.RFC']
if classname == "all":
return [v for v in enabled.values() if v != 'ferenda.Devel']
else:
if classname in enabled:
classname = enabled[classname]
return [classname]
def _run_class(enabled, argv):
"""Runs a particular action for a particular class.
:param enabled: The currently enabled repo classes, as returned by
:py:func:`~ferenda.Manager._enabled_classes`
:type enabled: dict
:param argv: An argv-style list of strings, see run (but note
that that function replaces ``all`` with every
registered class in turn and then calls this method
with the same argv.
:type argv: list
If the parameter ``--all`` is given (e.g. ``['myrepo', 'parse',
'--all']``), the specified command is run once for every available
file for that action (as determined by each class' implementation
of :py:meth:`~ferenda.DocumentRepository.list_basefiles_for`).
"""
log = setup_logger()
(alias, command, args) = _filter_argv(argv)
with util.logtime(log.info, "%(alias)s %(command)s finished in %(elapsed).3f sec",
{'alias':alias,
'command': command}):
_enabled_classes = dict(reversed(item) for item in enabled.items())
if alias not in enabled and alias not in _enabled_classes:
log.error("Class-or-alias %s not enabled" % alias)
return
if alias in argv:
argv.remove(alias)
if alias in _enabled_classes: # ie a fully qualified classname was used
classname = alias
else:
classname = enabled[alias]
cls = _load_class(classname)
inst = _instantiate_class(cls, argv=argv)
try:
clbl = getattr(inst, command)
assert(callable(clbl))
except: # action was None or not a callable thing
if command:
log.error("%s is not a valid command for %s" % (command, classname))
else:
log.error("No command given for %s" % classname)
_print_class_usage(cls)
return
if hasattr(inst.config, 'all') and inst.config.all == True:
res = []
# semi-magic handling
ret = cls.setup(command,inst.config)
if ret == False:
log.info("%s %s: Nothing to do!" % (alias, command))
else:
# TODO: use multiprocessing.pool.map or celery for
# task queue handling
for basefile in inst.list_basefiles_for(command):
try:
res.append(clbl(basefile))
except Exception as e:
log.error("%s of %s failed: %s" % (command,basefile,e))
res.append(sys.exc_info())
cls.teardown(command,inst.config)
else:
res = clbl(*args)
return res
def _instantiate_class(cls, configfile="ferenda.ini", argv=[]):
"""Given a class object, instantiate that class and make sure the
instance is properly configured given it's own defaults, a
config file, and command line parameters."""
inst = cls() # no options -- we re-set .config and .store explicitly
globalcfg = LayeredConfig({cls.alias: inst.get_default_options()},
configfile,
argv, cascade=True)
classcfg = getattr(globalcfg, cls.alias)
inst.config = classcfg
inst.store = inst.documentstore_class(classcfg.datadir + os.sep + inst.alias,
downloaded_suffix = inst.downloaded_suffix,
storage_policy = inst.storage_policy)
# FIXME: this is a quick hack for controlling trace loggers for
# ferenda.sources.legal.se.SFS. Must think abt how to generalize
# this.
if hasattr(inst,'trace'):
for tracelog in inst.trace:
try:
loglevel = getattr(inst.config.trace, tracelog)
log = logging.getLogger(inst.alias+"."+tracelog)
log.setLevel(loglevels.get(loglevel,'DEBUG'))
except AttributeError:
logging.getLogger(inst.alias+"."+tracelog).propagate = False
return inst
def _enabled_classes(inifile=None):
"""Returns a mapping (alias -> classname) for all registered classes.
>>> enable("ferenda.DocumentRepository") == 'base'
True
>>> _enabled_classes() == {'base': 'ferenda.DocumentRepository'}
True
>>> os.unlink("ferenda.ini")
:param inifile: The full path to a ferenda.ini file. If None, attempts
to find ini file using
:py:func:`ferenda.Manager._find_config_file`
:type inifile: str
:returns: A mapping between alias and classname for all registered classes.
:rtype: dict
"""
cfg = configparser.ConfigParser()
if not inifile:
inifile = _find_config_file()
cfg.read([inifile])
enabled = OrderedDict()
for section in cfg.sections():
if cfg.has_option(section, "class"):
enabled[section] = cfg.get(section, "class")
return enabled
def _print_usage():
"""Prints out general usage information for the ``ferenda-build.py`` tool."""
# general info, enabled classes
executable = sys.argv[0]
print("""Usage: %(executable)s [class-or-alias] [action] <arguments> <options>
e.g. '%(executable)s ferenda.sources.EurlexCaselaw enable'
'%(executable)s ecj parse 62008J0042'
'%(executable)s all generate'""" % locals())
enabled = _list_enabled_classes()
if enabled:
print("Available modules:")
for (alias, desc) in enabled.items():
print(" * %s: %s" % alias, desc)
def _list_enabled_classes():
"""Returns a mapping (alias -> description) for all registered classes.
>>> enable("ferenda.DocumentRepository") == 'base'
True
>>> _list_enabled_classes() == {'base': 'Base class for downloading, parsing and generating HTML versions of a repository of documents.'}
True
>>> os.unlink("ferenda.ini")
:returns: a mapping (alias -> description) for all registered classes
:rtype: dict
"""
res = OrderedDict()
for (alias, classname) in _enabled_classes().items():
cls = _load_class(classname)
res[alias] = cls.__doc__.split("\n")[0]
return res
def _print_class_usage(cls):
"""Given a class object, print out which actions are defined for that class.
:param cls: The class object to print usage information for
:type cls: class
"""
actions = _list_class_usage(cls)
if actions:
print("Valid actions are:")
else:
print("No valid actions in this class (%s). Did you forget the @action decorator?" % cls.__name__)
for action, desc in actions.items():
print(" * %s: %s" % action, desc)
def _list_class_usage(cls):
"""Given a class object, list the defined actions (with descriptions)
for that class.
>>> _list_class_usage(DocumentRepository) == {
... 'download':'Downloads all documents from a remote web service.',
... 'parse':'Parse downloaded documents into structured XML and RDF.',
... 'generate':'Generate a browser-ready HTML file from structured XML and RDF.'}
True
Note: Descriptions are taken from the first line of the action
methods' docstring.
:param cls: The class to list usage for.
:type cls: class
:return: a mapping of (action -> description) for a specified class.
:rtype: dict
"""
res = OrderedDict()
for attrname in dir(cls):
attr = getattr(cls, attrname)
if hasattr(attr, "runnable"):
res[attr.__name__] = attr.__doc__.split("\n")[0]
return res
def _filter_argv(args):
"""Given a command line, extract a tuple containing the
class-or-alias to use, the command to run, and the positional
arguments for that command. Strip away all --options.
:param args: A sys.argv style command line argument list.
:type args: list
:returns: (class-or-alias, command, [positional-arguments])
:rtype: tuple
"""
alias = None
command = None
commandargs = []
if len(args) > 0 and not args[0].startswith("--"):
alias = args[0]
if len(args) > 1 and not args[1].startswith("--"):
command = args[1]
if len(args) > 2:
for arg in args[2:]:
if not arg.startswith("--"):
commandargs.append(arg)
return (alias, command, commandargs)
def _filter_argv_options(args):
options=[]
for arg in args:
if arg.startswith("--"):
options.append(arg)
return options
def _load_class(classname):
"""Given a classname, imports and returns the corresponding class object.
:param classname: A fully qualified class name
:type classname: str
:returns: Corresponding class object
:rtype: class
"""
if "." in classname:
(modulename, localclassname) = classname.rsplit(".", 1)
else:
raise ValueError("Classname '%s' should be the fully qualified name of a class (i.e. 'modulename.%s')" % (classname, classname))
# NOTE: Don't remove this line! (or make sure testManager works after you do)
# log = logging.getLogger()
log = setup_logger()
__import__(modulename)
# __import__ returns the topmost module, ie if one attempts to
# import "ferenda.sources.SKVFS" it returns ferenda. But the
# lowermost module is available from sys.modules
# print("modulename: %s, localclassname: %s" % (modulename,localclassname))
# print("sys.modules: %s" % sys.modules.keys())
m = sys.modules[modulename]
classes = dict(inspect.getmembers(m, inspect.isclass))
for name, cls in list(classes.items()):
if name == localclassname:
return cls
raise ImportError("No class named '%s'" % classname)
def _find_config_file(path=None, create=False):
"""
:returns: the full path to the configuration ini file
"""
if not path:
path = os.getcwd()
inipath = path + os.sep + "ferenda.ini"
if not create and not os.path.exists(inipath):
raise errors.ConfigurationError("Config file %s not found (relative to %s)" % (inipath, os.getcwd()))
return inipath
def _setup_runserver_args(config, inifilename):
"""Given a config object, returns a dict with some of those
configuration options, but suitable as arguments for
:py:func:`ferenda.Manager.runserver`.
:param config: An initialized config object with data from a ferenda.ini
file
:type config: ferenda.LayeredConfig
:returns: A subset of the same configuration options
:rtype: dict
"""
port = urlsplit(config.url).port or 80
relativeroot = os.path.join(os.path.dirname(inifilename), config.datadir)
# create an instance of every enabled repo
enabled = _enabled_classes(inifilename)
repoclasses = _classes_from_classname(enabled, 'all')
repos = []
for cls in repoclasses:
instconfig = getattr(config, cls.alias)
config_as_dict = dict([(k,getattr(instconfig,k)) for k in instconfig])
inst = cls(**config_as_dict)
repos.append(inst)
#for repo in repos:
# print("Repo %r %s: config.datadir is %s" % (repo, id(repo), repo.config.datadir))
return {'port': port,
'documentroot': relativeroot,
'apiendpoint': config.apiendpoint,
'searchendpoint': config.searchendpoint,
'repos': repos}
def _setup_frontpage_args(config, argv):
# FIXME: This way of instantiating repo classes should maybe be
# used by _setup_makeresources_args as well?
#
# FIXME: why do we pass a config object when we re-read
# ferenda.ini at least twice (_enabled_classes and
# _instantiate_class) ?!
enabled = _enabled_classes() # reads only ferenda.ini using configparser rather than layeredconfig
repoclasses = _classes_from_classname(enabled, classname="all")
repos = []
for cls in repoclasses:
inst = _instantiate_class(cls, _find_config_file(), argv)
repos.append(inst)
return {'sitename': config.sitename,
'path': config.datadir + "/index.html",
'repos': repos}
def _filepath_to_urlpath(path,keep_segments=2):
"""
:param path: the full or relative filepath to transform into a urlpath
:param keep_segments: the number of directory segments to keep (the ending filename is always kept)
"""
# data/repo/rsrc/js/main.js, 3 -> repo/rsrc/js/main.js
# /var/folders/tmp4q6b1g/rsrc/resources.xml, 1 -> rsrc/resources.xml
urlpath = os.sep.join(path.split(os.sep)[-(keep_segments+1):])
# print("_filepath_to_urlpath (%s): %s -> %s" % (keep_segments, path, urlpath))
return urlpath.replace(os.sep, "/")
def _preflight_check():
"""Perform a check of needed modules and binaries."""
pythonver = (2, 6, 0)
# Module, min ver, required
modules = (
('bs4', '4.2.0', True),
# ('lxml', '3.2.0', True), # has no top level __version__ property
('rdflib', '4.0', True),
('html5lib', '0.95', True),
('rdflib_sqlalchemy', '0.2', True),
('requests', '1.2.0', True),
('six', '1.2.0', True),
('jsmin', '2.0.2', True),
('whoosh', '2.4.1', True),
('pyparsing', '1.5.7', True))
binaries = (('patch', '-v'),
('pdftotext', '-v'),
('pdftohtml', '-v'))
# 1: Check python ver
success = True
if sys.version_info < pythonver:
print("ERROR: ferenda requires Python %s or higher, you have %s" %
(".".join(pythonver), sys.version.split()[0]))
success = False
else:
print("Python version %s OK" % sys.version.split()[0])
# 2: Check modules -- TODO: Do we really need to do this?
for (mod, ver, required) in modules:
try:
m = __import__(mod)
version = getattr(m,'__version__', None)
if isinstance(version, tuple):
version = ".".join([str(x) for x in version])
if not hasattr(m, '__version__'):
print("WARNING: Module %s has no version information, it might be older than required" % mod)
elif version < ver:
if required:
print("ERROR: Module %s has version %s, need %s" %
(mod, version, ver))
success = False
else:
print("WARNING: Module %s has version %s, would like to hav %s" %
(mod, version, ver))
else:
print("Module %s OK" % mod)
except ImportError:
if required:
print("ERROR: Missing module %s" % mod)
success = False
else:
print("WARNING: Missing (non-essential) module %s" % mod)
# 3: Check binaries
for (cmd, arg) in binaries:
try:
ret = subprocess.call([cmd, arg],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if ret == 127:
print("ERROR: Binary %s failed to execute")
success = False
else:
print("Binary %s OK" % cmd)
except OSError as e:
print("ERROR: Binary %s failed: %s" % (cmd, e))
success = False
return success
def _select_triplestore(sitename):
# Try triplestores in order: Sesame, Fuseki, Sleepycat, SQLite,
# and return configuration for the first triplestore that works.
# 1. Sesame
try:
triplestore = os.environ.get('FERENDA_TRIPLESTORE_LOCATION',
'http://localhost:8080/openrdf-sesame')
resp = requests.get(triplestore + '/protocol')
resp.raise_for_status()
workbench = triplestore.replace('openrdf-sesame', 'openrdf-workbench')
print("Sesame server responding at %s (%s)" % (triplestore, resp.text))
# TODO: It is possible, if you put the exactly right triples
# in the SYSTEM repository, to create a new repo
# programmatically.
print("""You still need to create a repository at %(workbench)s ->
New repository. The following settings are recommended:
Type: Native Java store
ID: %(sitename)s
Title: Ferenda repository for %(sitename)s
Triple indexes: spoc,posc,cspo,opsc,psoc
""" % locals())
return('SESAME', triplestore, sitename)
except (requests.exceptions.HTTPError,
requests.exceptions.ConnectionError) as e:
print("... Sesame not available at %s: %s" % (triplestore, e))
pass
# 2. Fuseki
try:
triplestore = os.environ.get('FERENDA_TRIPLESTORE_LOCATION',
'http://localhost:3030/ds')
resp = requests.get(triplestore + "/data?default")
resp.raise_for_status()
print("Fuseki server responding at %s" % triplestore)
# TODO: Find out how to create a new datastore in Fuseki
# programatically so we can use
# http://localhost:3030/$SITENAME instead
return('FUSEKI', triplestore, 'ds')
except (requests.exceptions.HTTPError,
requests.exceptions.ConnectionError) as e:
print("... Fuseki not available at %s: %s" % (triplestore, e))
pass
# 3. RDFLib + Sleepycat
try:
t = TripleStore("test.db", "ferenda", storetype=TripleStore.SLEEPYCAT)
# No boom?
print("Sleepycat-backed RDFLib triplestore seems to work")
return ('SLEEPYCAT', 'data/ferenda.db', 'ferenda')
except ImportError as e:
print("...Sleepycat not available: %s" % e)
# 4. RDFLib + SQLite
try:
t = TripleStore("test.sqlite", "ferenda", storetype=TripleStore.SQLITE)
print("SQLite-backed RDFLib triplestore seems to work")
return ('SQLITE', 'data/ferenda.sqlite', 'ferenda')
except ImportError as e:
print("...SQLite not available: %s" % e)
print("No usable triplestores, the actions 'relate', 'generate' and 'toc' won't work")
return (None, None, None)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.